Merge tag 'mtd/for-4.16' of git://git.infradead.org/linux-mtd
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 29 Jan 2018 19:11:56 +0000 (11:11 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 29 Jan 2018 19:11:56 +0000 (11:11 -0800)
Pull MTD updates from Boris Brezillon:
 "MTD core changes:
   - Rework core functions to avoid duplicating generic checks in
     NAND/OneNAND sub-layers
   - Update the MAINTAINERS entry to reflect the fact that MTD
     maintainers now use a single git tree

  MTD driver changes:
   - CFI: use macros instead of inline functions to limit stack usage
     and make KASAN happy

  NAND core changes:
   - Fix NAND_CMD_NONE handling in nand_command[_lp]() hooks
   - Introduce the ->exec_op() infrastructure
   - Rework NAND buffers handling
   - Fix ECC requirements for K9F4G08U0D
   - Fix nand_do_read_oob() to return the number of bitflips
   - Mark K9F1G08U0E as not supporting subpage writes

  NAND driver changes:
   - MTK: Rework the driver to support new IP versions
   - OMAP OneNAND: Full rework to use new APIs (libgpio, dmaengine) and
     fix DT support
   - Marvell: Add a new driver to replace the pxa3xx one

  SPI NOR core changes:
   - Add support to new ISSI and Cypress/Spansion memory parts.
   - Fix support of Micron memories by checking error bits in the FSR.
   - Fix update of block-protection bits by reading back the SR.
   - Restore the internal state of the SPI flash memory when removing
     the device.

  SPI NOR driver changes:
   - Maintenance for Freescale, Intel and Metiatek drivers.
   - Add support of the direct access mode for the Cadence QSPI
     controller"

* tag 'mtd/for-4.16' of git://git.infradead.org/linux-mtd: (93 commits)
  mtd: nand: sunxi: Fix ECC strength choice
  mtd: nand: gpmi: Fix subpage reads
  mtd: nand: Fix build issues due to an anonymous union
  mtd: nand: marvell: Fix missing memory allocation modifier
  mtd: nand: marvell: remove redundant variable 'oob_len'
  mtd: nand: marvell: fix spelling mistake: "suceed"-> "succeed"
  mtd: onenand: omap2: Remove redundant dev_err call in omap2_onenand_probe()
  mtd: Remove duplicate checks on mtd_oob_ops parameter
  mtd: Fallback to ->_read/write_oob() when ->_read/write() is missing
  mtd: mtdpart: Make ECC stat handling consistent
  mtd: onenand: omap2: print resource using %pR format string
  mtd: mtk-nor: modify functions' name more generally
  mtd: onenand: samsung: remove incorrect __iomem annotation
  MAINTAINERS: Add entry for Marvell NAND controller driver
  ARM: OMAP2+: Remove gpmc-onenand
  mtd: onenand: omap2: Configure driver from DT
  mtd: onenand: omap2: Decouple DMA enabling from INT pin availability
  mtd: onenand: omap2: Do not make delay for GPIO OMAP3 specific
  mtd: onenand: omap2: Convert to use dmaengine for memcpy
  mtd: onenand: omap2: Unify OMAP2 and OMAP3 DMA implementation
  ...

3116 files changed:
.mailmap
Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32 [new file with mode: 0644]
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/kernel-parameters.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/thunderbolt.rst
Documentation/arm64/silicon-errata.txt
Documentation/cgroup-v2.txt
Documentation/core-api/genericirq.rst
Documentation/devicetree/bindings/arm/ccn.txt
Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
Documentation/devicetree/bindings/arm/omap/crossbar.txt
Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
Documentation/devicetree/bindings/clock/axi-clkgen.txt
Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt
Documentation/devicetree/bindings/clock/exynos4-clock.txt
Documentation/devicetree/bindings/clock/exynos5250-clock.txt
Documentation/devicetree/bindings/clock/exynos5410-clock.txt
Documentation/devicetree/bindings/clock/exynos5420-clock.txt
Documentation/devicetree/bindings/clock/exynos5440-clock.txt
Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt
Documentation/devicetree/bindings/clock/zx296702-clk.txt
Documentation/devicetree/bindings/crypto/fsl-sec4.txt
Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt
Documentation/devicetree/bindings/display/atmel,lcdc.txt
Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
Documentation/devicetree/bindings/dma/zxdma.txt
Documentation/devicetree/bindings/eeprom/at25.txt
Documentation/devicetree/bindings/gpio/gpio-altera.txt
Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
Documentation/devicetree/bindings/hwmon/jc42.txt
Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/iio/pressure/hp03.txt
Documentation/devicetree/bindings/input/touchscreen/bu21013.txt
Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
Documentation/devicetree/bindings/interrupt-controller/img,meta-intc.txt
Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt
Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
Documentation/devicetree/bindings/media/exynos5-gsc.txt
Documentation/devicetree/bindings/media/mediatek-vcodec.txt
Documentation/devicetree/bindings/media/rcar_vin.txt
Documentation/devicetree/bindings/media/samsung-fimc.txt
Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
Documentation/devicetree/bindings/media/video-interfaces.txt
Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
Documentation/devicetree/bindings/mfd/mc13xxx.txt
Documentation/devicetree/bindings/mfd/ti-keystone-devctrl.txt
Documentation/devicetree/bindings/misc/brcm,kona-smc.txt
Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt
Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
Documentation/devicetree/bindings/mtd/gpmc-nor.txt
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
Documentation/devicetree/bindings/mtd/mtk-nand.txt
Documentation/devicetree/bindings/net/altera_tse.txt
Documentation/devicetree/bindings/net/mdio.txt
Documentation/devicetree/bindings/net/socfpga-dwmac.txt
Documentation/devicetree/bindings/nios2/nios2.txt
Documentation/devicetree/bindings/opp/opp.txt
Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/altera-pcie.txt
Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
Documentation/devicetree/bindings/power/power_domain.txt
Documentation/devicetree/bindings/regulator/regulator.txt
Documentation/devicetree/bindings/serial/efm32-uart.txt
Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt
Documentation/devicetree/bindings/sound/adi,axi-i2s.txt
Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
Documentation/devicetree/bindings/sound/ak4613.txt
Documentation/devicetree/bindings/sound/ak4642.txt
Documentation/devicetree/bindings/sound/da7218.txt
Documentation/devicetree/bindings/sound/da7219.txt
Documentation/devicetree/bindings/sound/dmic.txt
Documentation/devicetree/bindings/sound/max98371.txt
Documentation/devicetree/bindings/sound/max98373.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/max9867.txt
Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt
Documentation/devicetree/bindings/sound/nau8825.txt
Documentation/devicetree/bindings/sound/pcm186x.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/renesas,fsi.txt
Documentation/devicetree/bindings/sound/renesas,rsnd.txt
Documentation/devicetree/bindings/sound/rockchip-spdif.txt
Documentation/devicetree/bindings/sound/simple-card.txt
Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/st,stm32-sai.txt
Documentation/devicetree/bindings/sound/sun4i-i2s.txt
Documentation/devicetree/bindings/sound/tas5720.txt
Documentation/devicetree/bindings/sound/tfa9879.txt
Documentation/devicetree/bindings/sound/ti,tas6424.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
Documentation/devicetree/bindings/sound/tlv320aic3x.txt
Documentation/devicetree/bindings/sound/tscs42xx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/uniphier,evea.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/efm32-spi.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/devicetree/bindings/thermal/thermal.txt
Documentation/devicetree/bindings/ufs/ufs-qcom.txt
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
Documentation/devicetree/bindings/usb/am33xx-usb.txt
Documentation/devicetree/bindings/usb/ehci-st.txt
Documentation/devicetree/bindings/usb/ohci-st.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt
Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt [new file with mode: 0644]
Documentation/driver-api/dmaengine/client.rst
Documentation/driver-api/iio/hw-consumer.rst [new file with mode: 0644]
Documentation/driver-api/iio/index.rst
Documentation/driver-api/pci.rst
Documentation/driver-api/pm/devices.rst
Documentation/driver-model/devres.txt
Documentation/filesystems/nilfs2.txt
Documentation/filesystems/overlayfs.txt
Documentation/gpu/i915.rst
Documentation/kbuild/kconfig-language.txt
Documentation/locking/crossrelease.txt [deleted file]
Documentation/media/dvb-drivers/frontends.rst [new file with mode: 0644]
Documentation/media/dvb-drivers/index.rst
Documentation/networking/index.rst
Documentation/networking/msg_zerocopy.rst
Documentation/power/pci.txt
Documentation/printk-formats.txt
Documentation/scsi/scsi_mid_low_api.txt
Documentation/sysctl/vm.txt
Documentation/thermal/cpu-cooling-api.txt
Documentation/usb/gadget-testing.txt
Documentation/virtual/kvm/api.txt
Documentation/vm/zswap.txt
Documentation/x86/pti.txt [new file with mode: 0644]
Documentation/x86/x86_64/mm.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/thread_info.h
arch/alpha/include/uapi/asm/Kbuild
arch/alpha/kernel/sys_sio.c
arch/alpha/lib/ev6-memset.S
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/hsdk_defconfig
arch/arc/include/asm/thread_info.h
arch/arc/include/asm/uaccess.h
arch/arc/include/uapi/asm/Kbuild
arch/arc/kernel/setup.c
arch/arc/kernel/stacktrace.c
arch/arc/kernel/traps.c
arch/arc/kernel/troubleshoot.c
arch/arc/plat-axs10x/axs10x.c
arch/arc/plat-hsdk/platform.c
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-cm-t43.dts
arch/arm/boot/dts/armada-385-db-ap.dts
arch/arm/boot/dts/armada-385-linksys.dtsi
arch/arm/boot/dts/armada-385-synology-ds116.dts
arch/arm/boot/dts/armada-388-gp.dts
arch/arm/boot/dts/aspeed-g4.dtsi
arch/arm/boot/dts/at91-tse850-3.dts
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/bcm958623hr.dts
arch/arm/boot/dts/bcm958625hr.dts
arch/arm/boot/dts/da850-lcdk.dts
arch/arm/boot/dts/da850-lego-ev3.dts
arch/arm/boot/dts/dm814x.dtsi
arch/arm/boot/dts/exynos5800-peach-pi.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6ul.dtsi
arch/arm/boot/dts/kirkwood-openblocks_a7.dts
arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
arch/arm/boot/dts/logicpd-som-lv.dtsi
arch/arm/boot/dts/ls1021a-qds.dts
arch/arm/boot/dts/ls1021a-twr.dts
arch/arm/boot/dts/meson.dtsi
arch/arm/boot/dts/nspire.dtsi
arch/arm/boot/dts/omap3-beagle-xm.dts
arch/arm/boot/dts/omap3-beagle.dts
arch/arm/boot/dts/omap3-cm-t3x.dtsi
arch/arm/boot/dts/omap3-evm-common.dtsi
arch/arm/boot/dts/omap3-gta04.dtsi
arch/arm/boot/dts/omap3-igep0020-common.dtsi
arch/arm/boot/dts/omap3-igep0030-common.dtsi
arch/arm/boot/dts/omap3-lilly-a83x.dtsi
arch/arm/boot/dts/omap3-overo-base.dtsi
arch/arm/boot/dts/omap3-pandora-common.dtsi
arch/arm/boot/dts/omap3-tao3530.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/omap4-duovero.dtsi
arch/arm/boot/dts/omap4-panda-common.dtsi
arch/arm/boot/dts/omap4-var-som-om44.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-cm-t54.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/r8a7792.dtsi
arch/arm/boot/dts/r8a7793.dtsi
arch/arm/boot/dts/r8a7794.dtsi
arch/arm/boot/dts/rk3066a-marsboard.dts
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
arch/arm/boot/dts/tango4-common.dtsi
arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
arch/arm/configs/sunxi_defconfig
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/thread_info.h
arch/arm/include/uapi/asm/Kbuild
arch/arm/kernel/entry-header.S
arch/arm/kernel/traps.c
arch/arm/lib/csumpartialcopyuser.S
arch/arm/mach-davinci/dm365.c
arch/arm/mach-meson/platsmp.c
arch/arm/mach-omap2/cm_common.c
arch/arm/mach-omap2/omap-secure.c
arch/arm/mach-omap2/omap-secure.h
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/pm.h
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/prcm-common.h
arch/arm/mach-omap2/prm33xx.c
arch/arm/mach-omap2/sleep34xx.S
arch/arm/net/bpf_jit_32.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/Makefile
arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/arm64/boot/dts/renesas/salvator-common.dtsi
arch/arm64/boot/dts/renesas/ulcb.dtsi
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/perf_event.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpu-reset.S
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ftrace-mod.S [deleted file]
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/head.S
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/module-plts.c
arch/arm64/kernel/module.lds
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/process.c
arch/arm64/kernel/relocate_kernel.S
arch/arm64/kvm/debug.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/hyp/debug-sr.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/mm/context.c
arch/arm64/mm/dump.c
arch/arm64/mm/fault.c
arch/arm64/mm/init.c
arch/arm64/mm/pgd.c
arch/arm64/net/bpf_jit_comp.c
arch/blackfin/include/asm/thread_info.h
arch/blackfin/include/uapi/asm/Kbuild
arch/c6x/include/asm/thread_info.h
arch/c6x/include/uapi/asm/Kbuild
arch/cris/include/asm/processor.h
arch/cris/include/asm/thread_info.h
arch/cris/include/uapi/asm/Kbuild
arch/cris/kernel/vmlinux.lds.S
arch/frv/include/asm/thread_info.h
arch/frv/include/uapi/asm/Kbuild
arch/h8300/include/asm/thread_info.h
arch/h8300/include/uapi/asm/Kbuild
arch/hexagon/include/asm/thread_info.h
arch/hexagon/include/uapi/asm/Kbuild
arch/hexagon/kernel/vmlinux.lds.S
arch/ia64/Kconfig
arch/ia64/Makefile
arch/ia64/include/asm/atomic.h
arch/ia64/include/asm/thread_info.h
arch/ia64/include/uapi/asm/Kbuild
arch/ia64/kernel/Makefile
arch/ia64/kernel/acpi.c
arch/ia64/kernel/init_task.c [deleted file]
arch/ia64/kernel/time.c
arch/ia64/kernel/vmlinux.lds.S
arch/m32r/include/asm/thread_info.h
arch/m32r/include/uapi/asm/Kbuild
arch/m32r/kernel/traps.c
arch/m68k/configs/stmark2_defconfig
arch/m68k/include/asm/thread_info.h
arch/m68k/include/uapi/asm/Kbuild
arch/m68k/kernel/vmlinux-nommu.lds
arch/m68k/kernel/vmlinux-std.lds
arch/m68k/kernel/vmlinux-sun3.lds
arch/metag/include/asm/thread_info.h
arch/metag/include/uapi/asm/Kbuild
arch/microblaze/include/asm/mmu_context_mm.h
arch/microblaze/include/asm/thread_info.h
arch/microblaze/include/uapi/asm/Kbuild
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/ar7/platform.c
arch/mips/ath25/devices.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/serial.h [new file with mode: 0644]
arch/mips/include/asm/thread_info.h
arch/mips/include/uapi/asm/Kbuild
arch/mips/kernel/cps-vec.S
arch/mips/kernel/mips-cm.c
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kvm/mips.c
arch/mips/lib/Makefile
arch/mips/lib/libgcc.h
arch/mips/lib/multi3.c [new file with mode: 0644]
arch/mips/mm/uasm-micromips.c
arch/mips/ralink/timer.c
arch/mips/rb532/Makefile
arch/mips/rb532/devices.c
arch/mn10300/include/asm/thread_info.h
arch/mn10300/include/uapi/asm/Kbuild
arch/nios2/include/asm/thread_info.h
arch/nios2/include/uapi/asm/Kbuild
arch/openrisc/include/asm/processor.h
arch/openrisc/include/asm/thread_info.h
arch/openrisc/include/uapi/asm/Kbuild
arch/openrisc/kernel/vmlinux.lds.S
arch/parisc/boot/compressed/misc.c
arch/parisc/include/asm/ldcw.h
arch/parisc/include/asm/thread_info.h
arch/parisc/include/uapi/asm/Kbuild
arch/parisc/kernel/drivers.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/hpmc.S
arch/parisc/kernel/pacache.S
arch/parisc/kernel/process.c
arch/parisc/kernel/unwind.c
arch/parisc/lib/delay.c
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/exception-64e.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/feature-fixups.h
arch/powerpc/include/asm/hvcall.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/plpar_wrappers.h
arch/powerpc/include/asm/setup.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/uapi/asm/Kbuild
arch/powerpc/include/uapi/asm/kvm.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/fadump.c
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/kvm/book3s_64_mmu.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/book3s_rmhandlers.S
arch/powerpc/kvm/book3s_segment.S
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/lib/feature-fixups.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/imc-pmu.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/ps3/setup.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/pseries.h
arch/powerpc/platforms/pseries/ras.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/fsl_msi.c
arch/powerpc/xmon/xmon.c
arch/riscv/configs/defconfig
arch/riscv/include/asm/Kbuild
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/atomic.h
arch/riscv/include/asm/barrier.h
arch/riscv/include/asm/bitops.h
arch/riscv/include/asm/bug.h
arch/riscv/include/asm/cacheflush.h
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/io.h
arch/riscv/include/asm/irqflags.h
arch/riscv/include/asm/mmu.h
arch/riscv/include/asm/mmu_context.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/ptrace.h
arch/riscv/include/asm/spinlock.h
arch/riscv/include/asm/thread_info.h
arch/riscv/include/asm/timex.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/include/asm/uaccess.h
arch/riscv/include/asm/unistd.h
arch/riscv/include/asm/vdso.h
arch/riscv/include/uapi/asm/Kbuild
arch/riscv/include/uapi/asm/syscalls.h [new file with mode: 0644]
arch/riscv/kernel/entry.S
arch/riscv/kernel/head.S
arch/riscv/kernel/process.c
arch/riscv/kernel/riscv_ksyms.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/sys_riscv.c
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/clock_getres.S [new file with mode: 0644]
arch/riscv/kernel/vdso/clock_gettime.S [new file with mode: 0644]
arch/riscv/kernel/vdso/flush_icache.S [new file with mode: 0644]
arch/riscv/kernel/vdso/getcpu.S [new file with mode: 0644]
arch/riscv/kernel/vdso/gettimeofday.S [new file with mode: 0644]
arch/riscv/kernel/vdso/vdso.lds.S
arch/riscv/lib/delay.c
arch/riscv/mm/Makefile
arch/riscv/mm/cacheflush.c [new file with mode: 0644]
arch/riscv/mm/fault.c
arch/riscv/mm/ioremap.c
arch/s390/Kbuild
arch/s390/Makefile
arch/s390/appldata/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/appldata/appldata_mem.c
arch/s390/appldata/appldata_net_sum.c
arch/s390/appldata/appldata_os.c
arch/s390/boot/compressed/vmlinux.scr
arch/s390/boot/install.sh
arch/s390/crypto/aes_s390.c
arch/s390/crypto/arch_random.c
arch/s390/crypto/crc32-vx.c
arch/s390/crypto/des_s390.c
arch/s390/crypto/ghash_s390.c
arch/s390/crypto/paes_s390.c
arch/s390/crypto/prng.c
arch/s390/crypto/sha.h
arch/s390/crypto/sha1_s390.c
arch/s390/crypto/sha256_s390.c
arch/s390/crypto/sha512_s390.c
arch/s390/crypto/sha_common.c
arch/s390/hypfs/Makefile
arch/s390/hypfs/inode.c
arch/s390/include/asm/Kbuild
arch/s390/include/asm/alternative.h
arch/s390/include/asm/ap.h
arch/s390/include/asm/bugs.h
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/kprobes.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/kvm_para.h
arch/s390/include/asm/livepatch.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/perf_event.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/ptrace.h
arch/s390/include/asm/segment.h
arch/s390/include/asm/switch_to.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/sysinfo.h
arch/s390/include/asm/thread_info.h
arch/s390/include/asm/topology.h
arch/s390/include/asm/vga.h
arch/s390/include/uapi/asm/Kbuild
arch/s390/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_para.h
arch/s390/include/uapi/asm/kvm_perf.h
arch/s390/include/uapi/asm/perf_regs.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sthyi.h
arch/s390/include/uapi/asm/virtio-ccw.h
arch/s390/include/uapi/asm/vmcp.h
arch/s390/include/uapi/asm/zcrypt.h
arch/s390/kernel/alternative.c
arch/s390/kernel/compat_linux.c
arch/s390/kernel/debug.c
arch/s390/kernel/dis.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/entry.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/lgr.c
arch/s390/kernel/module.c
arch/s390/kernel/nmi.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/perf_regs.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/sthyi.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/time.c
arch/s390/kernel/topology.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vdso32/clock_getres.S
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso32/gettimeofday.S
arch/s390/kernel/vdso64/clock_getres.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kernel/vdso64/gettimeofday.S
arch/s390/kernel/vdso64/note.S
arch/s390/kernel/vtime.c
arch/s390/kvm/Makefile
arch/s390/kvm/diag.c
arch/s390/kvm/gaccess.h
arch/s390/kvm/guestdbg.c
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/irq.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/kvm/sigp.c
arch/s390/kvm/vsie.c
arch/s390/lib/uaccess.c
arch/s390/mm/cmm.c
arch/s390/mm/gmap.c
arch/s390/mm/mmap.c
arch/s390/mm/pgalloc.c
arch/s390/mm/pgtable.c
arch/s390/net/Makefile
arch/s390/net/bpf_jit_comp.c
arch/s390/numa/Makefile
arch/s390/pci/Makefile
arch/s390/pci/pci.c
arch/s390/pci/pci_debug.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_insn.c
arch/s390/tools/gen_opcode_table.c
arch/score/include/asm/thread_info.h
arch/score/include/uapi/asm/Kbuild
arch/sh/boards/mach-se/770x/setup.c
arch/sh/include/asm/thread_info.h
arch/sh/include/mach-se/mach/se.h
arch/sh/include/uapi/asm/Kbuild
arch/sparc/crypto/Makefile
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/thread_info_32.h
arch/sparc/include/asm/thread_info_64.h
arch/sparc/include/uapi/asm/Kbuild
arch/sparc/lib/Makefile
arch/sparc/lib/hweight.S
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/net/bpf_jit_comp_64.c
arch/tile/include/asm/pgtable.h
arch/tile/include/asm/thread_info.h
arch/tile/include/uapi/asm/Kbuild
arch/um/include/asm/Kbuild
arch/um/include/asm/mmu_context.h
arch/um/include/asm/processor-generic.h
arch/um/include/asm/thread_info.h
arch/um/include/asm/vmlinux.lds.h [new file with mode: 0644]
arch/um/kernel/dyn.lds.S
arch/um/kernel/trap.c
arch/um/kernel/um_arch.c
arch/um/kernel/uml.lds.S
arch/unicore32/include/asm/mmu_context.h
arch/unicore32/include/asm/thread_info.h
arch/unicore32/include/uapi/asm/Kbuild
arch/unicore32/kernel/traps.c
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/pagetable.c
arch/x86/boot/compressed/pgtable_64.c [new file with mode: 0644]
arch/x86/boot/genimage.sh
arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/camellia-aesni-avx-asm_64.S
arch/x86/crypto/camellia-aesni-avx2-asm_64.S
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
arch/x86/crypto/salsa20_glue.c
arch/x86/entry/calling.h
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/entry/vsyscall/vsyscall_64.c
arch/x86/events/amd/power.c
arch/x86/events/intel/bts.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/rapl.c
arch/x86/events/perf_event.h
arch/x86/include/asm/acpi.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/asm-prototypes.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/cpu_entry_area.h [new file with mode: 0644]
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/desc.h
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/espfix.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/hypervisor.h
arch/x86/include/asm/intel_ds.h [new file with mode: 0644]
arch/x86/include/asm/invpcid.h [new file with mode: 0644]
arch/x86/include/asm/irqdomain.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/kmemcheck.h [deleted file]
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mem_encrypt.h
arch/x86/include/asm/mmu.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h [new file with mode: 0644]
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/pci_x86.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32_types.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/processor-flags.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/pti.h [new file with mode: 0644]
arch/x86/include/asm/segment.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/suspend_32.h
arch/x86/include/asm/suspend_64.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/trace/irq_vectors.h
arch/x86/include/asm/traps.h
arch/x86/include/asm/unwind.h
arch/x86/include/asm/vsyscall.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/uapi/asm/Kbuild
arch/x86/include/uapi/asm/processor-flags.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_flat_64.c
arch/x86/kernel/apic/apic_noop.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/msi.c
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel_rdt.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/doublefault.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/ftrace_32.S
arch/x86/kernel/ftrace_64.S
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/kernel/idt.c
arch/x86/kernel/ioport.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/irqinit.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/ldt.c
arch/x86/kernel/machine_kexec_32.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/stacktrace.c
arch/x86/kernel/tboot.c
arch/x86/kernel/tls.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/unwind_orc.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/cpuid.h
arch/x86/kvm/emulate.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/Makefile
arch/x86/lib/checksum_32.S
arch/x86/lib/delay.c
arch/x86/lib/retpoline.S [new file with mode: 0644]
arch/x86/lib/x86-opcode-map.txt
arch/x86/mm/Makefile
arch/x86/mm/cpu_entry_area.c [new file with mode: 0644]
arch/x86/mm/debug_pagetables.c
arch/x86/mm/dump_pagetables.c
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/init_32.c
arch/x86/mm/ioremap.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/kaslr.c
arch/x86/mm/kmemcheck/error.c [deleted file]
arch/x86/mm/kmemcheck/error.h [deleted file]
arch/x86/mm/kmemcheck/opcode.c [deleted file]
arch/x86/mm/kmemcheck/opcode.h [deleted file]
arch/x86/mm/kmemcheck/pte.c [deleted file]
arch/x86/mm/kmemcheck/pte.h [deleted file]
arch/x86/mm/kmemcheck/selftest.c [deleted file]
arch/x86/mm/kmemcheck/selftest.h [deleted file]
arch/x86/mm/kmemcheck/shadow.h [deleted file]
arch/x86/mm/kmmio.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/mem_encrypt_boot.S
arch/x86/mm/pgtable.c
arch/x86/mm/pgtable_32.c
arch/x86/mm/pti.c [new file with mode: 0644]
arch/x86/mm/tlb.c
arch/x86/pci/broadcom_bus.c
arch/x86/pci/common.c
arch/x86/pci/fixup.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
arch/x86/platform/intel-mid/device_libs/platform_bt.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/platform/uv/uv_irq.c
arch/x86/platform/uv/uv_nmi.c
arch/x86/power/cpu.c
arch/x86/xen/apic.c
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/setup.c
arch/x86/xen/xen-asm_64.S
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/thread_info.h
arch/xtensa/include/uapi/asm/Kbuild
block/bio.c
block/blk-core.c
block/blk-map.c
block/blk-mq.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk-wbt.c
block/blk.h
block/bounce.c
block/genhd.c
block/kyber-iosched.c
crypto/af_alg.c
crypto/algapi.c
crypto/algif_aead.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/pkcs7_parser.c
crypto/asymmetric_keys/pkcs7_trust.c
crypto/asymmetric_keys/pkcs7_verify.c
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/x509_cert_parser.c
crypto/asymmetric_keys/x509_public_key.c
crypto/chacha20poly1305.c
crypto/hmac.c
crypto/mcryptd.c
crypto/pcrypt.c
crypto/rsa_helper.c
crypto/salsa20_generic.c
crypto/shash.c
crypto/skcipher.c
drivers/Makefile
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_video.c
drivers/acpi/acpica/acapps.h
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/dbexec.c
drivers/acpi/acpica/dbfileio.c
drivers/acpi/acpica/dbinput.c
drivers/acpi/acpica/dscontrol.c
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/dspkginit.c
drivers/acpi/acpica/dsutils.c
drivers/acpi/acpica/dswload.c
drivers/acpi/acpica/dswload2.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/hwtimer.c
drivers/acpi/acpica/hwvalid.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsconvert.c
drivers/acpi/acpica/nsnames.c
drivers/acpi/acpica/nssearch.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/psobject.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utdecode.c
drivers/acpi/acpica/uterror.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utmath.c
drivers/acpi/acpica/utmutex.c
drivers/acpi/acpica/utnonansi.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utstrsuppt.c
drivers/acpi/acpica/uttrack.c
drivers/acpi/acpica/utxferror.c
drivers/acpi/apei/erst.c
drivers/acpi/apei/ghes.c
drivers/acpi/battery.c
drivers/acpi/button.c
drivers/acpi/cppc_acpi.c
drivers/acpi/device_pm.c
drivers/acpi/device_sysfs.c
drivers/acpi/ec.c
drivers/acpi/ec_sys.c
drivers/acpi/evged.c
drivers/acpi/internal.h
drivers/acpi/nfit/core.c
drivers/acpi/numa.c
drivers/acpi/pci_link.c
drivers/acpi/pmic/intel_pmic_bxtwc.c
drivers/acpi/pmic/intel_pmic_chtdc_ti.c
drivers/acpi/pmic/intel_pmic_chtwc.c
drivers/acpi/pmic/intel_pmic_crc.c
drivers/acpi/pmic/intel_pmic_xpower.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/acpi/utils.c
drivers/android/binder.c
drivers/ata/ahci_mtk.c
drivers/ata/ahci_qoriq.c
drivers/ata/libata-core.c
drivers/ata/pata_pdc2027x.c
drivers/atm/ambassador.c
drivers/atm/fore200e.c
drivers/atm/lanai.c
drivers/atm/suni.c
drivers/auxdisplay/Kconfig
drivers/base/Kconfig
drivers/base/cacheinfo.c
drivers/base/cpu.c
drivers/base/isa.c
drivers/base/power/domain.c
drivers/base/power/main.c
drivers/base/power/power.h
drivers/base/power/runtime.c
drivers/base/power/sysfs.c
drivers/base/power/wakeirq.c
drivers/base/power/wakeup.c
drivers/bcma/Kconfig
drivers/block/loop.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/bus/Kconfig
drivers/bus/arm-cci.c
drivers/bus/arm-ccn.c
drivers/bus/sunxi-rsb.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_si_parisc.c
drivers/char/ipmi/ipmi_si_pci.c
drivers/clk/clk.c
drivers/clk/sunxi/clk-sun9i-mmc.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/armada-37xx-cpufreq.c [new file with mode: 0644]
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/mediatek-cpufreq.c
drivers/cpufreq/mvebu-cpufreq.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/qoriq-cpufreq.c
drivers/cpufreq/scpi-cpufreq.c
drivers/cpufreq/ti-cpufreq.c
drivers/cpuidle/governor.c
drivers/crypto/chelsio/Kconfig
drivers/crypto/inside-secure/safexcel.c
drivers/crypto/inside-secure/safexcel_cipher.c
drivers/crypto/inside-secure/safexcel_hash.c
drivers/crypto/n2_core.c
drivers/dax/device.c
drivers/devfreq/devfreq.c
drivers/dma/at_hdmac.c
drivers/dma/dma-jz4740.c
drivers/dma/dmatest.c
drivers/dma/fsl-edma.c
drivers/dma/ioat/init.c
drivers/dma/sh/rcar-dmac.c
drivers/extcon/extcon-axp288.c
drivers/extcon/extcon-usbc-cros-ec.c
drivers/firmware/arm_scpi.c
drivers/firmware/efi/capsule-loader.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/runtime-map.c
drivers/firmware/google/vpd.c
drivers/firmware/psci_checker.c
drivers/firmware/qemu_fw_cfg.c
drivers/gpio/gpio-74x164.c
drivers/gpio/gpio-bcm-kona.c
drivers/gpio/gpio-brcmstb.c
drivers/gpio/gpio-davinci.c
drivers/gpio/gpio-merrifield.c
drivers/gpio/gpio-mmio.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-reg.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpio-xgene-sb.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-devprop.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpio/gpiolib.h
drivers/gpu/drm/amd/acp/Makefile
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdkfd/Makefile
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/display/Makefile
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/dc/Makefile
drivers/gpu/drm/amd/display/dc/basics/Makefile
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
drivers/gpu/drm/amd/display/dc/bios/Makefile
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_debug.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc_helper.c
drivers/gpu/drm/amd/display/dc/dce/Makefile
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce100/Makefile
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
drivers/gpu/drm/amd/display/dc/dce110/Makefile
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
drivers/gpu/drm/amd/display/dc/dce112/Makefile
drivers/gpu/drm/amd/display/dc/dce120/Makefile
drivers/gpu/drm/amd/display/dc/dce80/Makefile
drivers/gpu/drm/amd/display/dc/dcn10/Makefile
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/gpio/Makefile
drivers/gpu/drm/amd/display/dc/i2caux/Makefile
drivers/gpu/drm/amd/display/dc/inc/core_status.h
drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
drivers/gpu/drm/amd/display/dc/irq/Makefile
drivers/gpu/drm/amd/display/dc/virtual/Makefile
drivers/gpu/drm/amd/display/modules/freesync/Makefile
drivers/gpu/drm/amd/lib/Makefile
drivers/gpu/drm/amd/powerplay/Makefile
drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
drivers/gpu/drm/amd/powerplay/inc/smu72.h
drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
drivers/gpu/drm/amd/powerplay/smumgr/Makefile
drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_crtc.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_hw.h
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_crtc.h
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/bridge/adv7511/adv7511.h
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/lvds-encoder.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_mode_config.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gemfs.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_sw_fence.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nouveau_vmm.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
drivers/gpu/drm/omapdrm/displays/Kconfig
drivers/gpu/drm/omapdrm/dss/dpi.c
drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_irq.c
drivers/gpu/drm/vc4/vc4_v3d.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/hid/hid-core.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-holtekff.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/vmbus_drv.c
drivers/hwmon/hwmon.c
drivers/hwmon/jc42.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwtracing/stm/ftrace.c
drivers/i2c/busses/i2c-cht-wc.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-stm32.h
drivers/i2c/busses/i2c-stm32f4.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/i2c-boardinfo.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core-smbus.c
drivers/iio/adc/Kconfig
drivers/iio/adc/Makefile
drivers/iio/adc/cpcap-adc.c
drivers/iio/adc/meson_saradc.c
drivers/iio/adc/sd_adc_modulator.c [new file with mode: 0644]
drivers/iio/adc/stm32-dfsdm-adc.c [new file with mode: 0644]
drivers/iio/adc/stm32-dfsdm-core.c [new file with mode: 0644]
drivers/iio/adc/stm32-dfsdm.h [new file with mode: 0644]
drivers/iio/buffer/Kconfig
drivers/iio/buffer/Makefile
drivers/iio/buffer/industrialio-buffer-cb.c
drivers/iio/buffer/industrialio-hw-consumer.c [new file with mode: 0644]
drivers/iio/health/max30102.c
drivers/iio/industrialio-core.c
drivers/iio/inkern.c
drivers/iio/proximity/sx9500.c
drivers/infiniband/Kconfig
drivers/infiniband/core/cma.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/device.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/security.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_hem.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_d.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/cmd.c
drivers/infiniband/hw/mlx5/cmd.h
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/joystick/analog.c
drivers/input/joystick/xpad.c
drivers/input/misc/ims-pcu.c
drivers/input/misc/twl4030-vibra.c
drivers/input/misc/twl6040-vibra.c
drivers/input/misc/xen-kbdfront.c
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/trackpoint.c
drivers/input/mouse/trackpoint.h
drivers/input/rmi4/rmi_driver.c
drivers/input/rmi4/rmi_f01.c
drivers/input/touchscreen/88pm860x-ts.c
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/hideep.c
drivers/input/touchscreen/of_touchscreen.c
drivers/input/touchscreen/s6sy761.c
drivers/input/touchscreen/stmfts.c
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-renesas-intc-irqpin.c
drivers/leds/led-core.c
drivers/leds/leds-pm8058.c
drivers/md/bcache/alloc.c
drivers/md/bcache/btree.c
drivers/md/bcache/extents.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-mpath.c
drivers/md/dm-snap.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/persistent-data/dm-btree.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/media/common/siano/smscoreapi.c
drivers/media/dvb-core/dvb_ca_en50221.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-core/dvb_net.c
drivers/media/dvb-frontends/af9013.h
drivers/media/dvb-frontends/ascot2e.h
drivers/media/dvb-frontends/cxd2820r.h
drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
drivers/media/dvb-frontends/drx39xyj/drx_driver.h
drivers/media/dvb-frontends/drx39xyj/drxj.c
drivers/media/dvb-frontends/drx39xyj/drxj.h
drivers/media/dvb-frontends/drxk.h
drivers/media/dvb-frontends/drxk_hard.c
drivers/media/dvb-frontends/dvb-pll.h
drivers/media/dvb-frontends/helene.h
drivers/media/dvb-frontends/horus3a.h
drivers/media/dvb-frontends/ix2505v.c
drivers/media/dvb-frontends/ix2505v.h
drivers/media/dvb-frontends/l64781.c
drivers/media/dvb-frontends/m88ds3103.h
drivers/media/dvb-frontends/mb86a20s.h
drivers/media/dvb-frontends/mn88472.h
drivers/media/dvb-frontends/rtl2830.h
drivers/media/dvb-frontends/rtl2832.h
drivers/media/dvb-frontends/rtl2832_sdr.h
drivers/media/dvb-frontends/sp887x.c
drivers/media/dvb-frontends/stb6000.h
drivers/media/dvb-frontends/stv0299.c
drivers/media/dvb-frontends/tda10071.h
drivers/media/dvb-frontends/tda826x.h
drivers/media/dvb-frontends/tua6100.c
drivers/media/dvb-frontends/tua6100.h
drivers/media/dvb-frontends/zd1301_demod.h
drivers/media/dvb-frontends/zl10036.c
drivers/media/dvb-frontends/zl10036.h
drivers/media/i2c/Kconfig
drivers/media/i2c/et8ek8/Kconfig
drivers/media/i2c/imx274.c
drivers/media/i2c/lm3560.c
drivers/media/i2c/m5mols/m5mols_capture.c
drivers/media/i2c/m5mols/m5mols_controls.c
drivers/media/i2c/m5mols/m5mols_core.c
drivers/media/i2c/ov5647.c
drivers/media/i2c/s5k6a3.c
drivers/media/i2c/s5k6aa.c
drivers/media/i2c/tvp514x.c
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
drivers/media/pci/solo6x10/solo6x10-enc.c
drivers/media/pci/sta2x11/sta2x11_vip.c
drivers/media/pci/tw68/tw68-risc.c
drivers/media/platform/davinci/vpif.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/platform/exynos4-is/fimc-capture.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/exynos4-is/mipi-csis.c
drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
drivers/media/platform/mtk-vpu/mtk_vpu.c
drivers/media/platform/pxa_camera.c
drivers/media/platform/rcar_fdp1.c
drivers/media/platform/rcar_jpu.c
drivers/media/platform/s3c-camif/camif-core.c
drivers/media/platform/sh_veu.c
drivers/media/platform/soc_camera/soc_scale_crop.c
drivers/media/platform/sti/hva/hva-h264.c
drivers/media/platform/ti-vpe/vpe.c
drivers/media/platform/vim2m.c
drivers/media/platform/vsp1/vsp1_dl.c
drivers/media/radio/radio-si476x.c
drivers/media/radio/radio-wl1273.c
drivers/media/rc/img-ir/img-ir-hw.c
drivers/media/rc/imon.c
drivers/media/rc/ir-jvc-decoder.c
drivers/media/rc/ir-lirc-codec.c
drivers/media/rc/ir-nec-decoder.c
drivers/media/rc/ir-sanyo-decoder.c
drivers/media/rc/ir-sharp-decoder.c
drivers/media/rc/ir-xmp-decoder.c
drivers/media/rc/rc-ir-raw.c
drivers/media/rc/rc-main.c
drivers/media/rc/sir_ir.c
drivers/media/rc/st_rc.c
drivers/media/rc/streamzap.c
drivers/media/tuners/mt2063.c
drivers/media/usb/dvb-usb/cinergyT2-fe.c
drivers/media/usb/dvb-usb/dib0700_devices.c
drivers/media/usb/dvb-usb/dibusb-common.c
drivers/media/usb/dvb-usb/friio-fe.c
drivers/media/usb/dvb-usb/friio.c
drivers/media/usb/gspca/ov519.c
drivers/media/usb/pwc/pwc-dec23.c
drivers/media/usb/siano/smsusb.c
drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
drivers/media/usb/usbtv/usbtv-core.c
drivers/media/v4l2-core/tuner-core.c
drivers/media/v4l2-core/v4l2-async.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/media/v4l2-core/v4l2-fwnode.c
drivers/media/v4l2-core/v4l2-mem2mem.c
drivers/media/v4l2-core/videobuf-core.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-memops.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/memstick/host/Kconfig
drivers/memstick/host/rtsx_pci_ms.c
drivers/memstick/host/rtsx_usb_ms.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/ab8500-debugfs.c
drivers/mfd/arizona-irq.c
drivers/mfd/atmel-flexcom.c
drivers/mfd/axp20x.c
drivers/mfd/cros_ec.c
drivers/mfd/cros_ec_dev.c [new file with mode: 0644]
drivers/mfd/cros_ec_dev.h [new file with mode: 0644]
drivers/mfd/cros_ec_spi.c
drivers/mfd/intel-lpss.c
drivers/mfd/intel_soc_pmic_core.c
drivers/mfd/kempld-core.c
drivers/mfd/lpc_ich.c
drivers/mfd/max77843.c
drivers/mfd/palmas.c
drivers/mfd/pcf50633-core.c
drivers/mfd/rave-sp.c [new file with mode: 0644]
drivers/mfd/rtl8411.c [deleted file]
drivers/mfd/rts5209.c [deleted file]
drivers/mfd/rts5227.c [deleted file]
drivers/mfd/rts5229.c [deleted file]
drivers/mfd/rts5249.c [deleted file]
drivers/mfd/rtsx_pcr.c [deleted file]
drivers/mfd/rtsx_pcr.h [deleted file]
drivers/mfd/rtsx_usb.c [deleted file]
drivers/mfd/stm32-lptimer.c
drivers/mfd/stm32-timers.c
drivers/mfd/ti_am335x_tscadc.c
drivers/mfd/tmio_core.c
drivers/mfd/twl4030-audio.c
drivers/mfd/twl6040.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/cardreader/Kconfig [new file with mode: 0644]
drivers/misc/cardreader/Makefile [new file with mode: 0644]
drivers/misc/cardreader/rtl8411.c [new file with mode: 0644]
drivers/misc/cardreader/rts5209.c [new file with mode: 0644]
drivers/misc/cardreader/rts5227.c [new file with mode: 0644]
drivers/misc/cardreader/rts5229.c [new file with mode: 0644]
drivers/misc/cardreader/rts5249.c [new file with mode: 0644]
drivers/misc/cardreader/rts5260.c [new file with mode: 0644]
drivers/misc/cardreader/rts5260.h [new file with mode: 0644]
drivers/misc/cardreader/rtsx_pcr.c [new file with mode: 0644]
drivers/misc/cardreader/rtsx_pcr.h [new file with mode: 0644]
drivers/misc/cardreader/rtsx_usb.c [new file with mode: 0644]
drivers/misc/cxl/pci.c
drivers/misc/eeprom/at24.c
drivers/misc/pti.c
drivers/mmc/core/block.c
drivers/mmc/core/bus.c
drivers/mmc/core/card.h
drivers/mmc/core/debugfs.c
drivers/mmc/core/mmc.c
drivers/mmc/core/quirks.h
drivers/mmc/core/sd.c
drivers/mmc/host/Kconfig
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mmc/host/rtsx_usb_sdmmc.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdsuper.c
drivers/mtd/nand/brcmnand/brcmnand.c
drivers/mtd/nand/gpio.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mux/core.c
drivers/net/bonding/bond_netlink.c
drivers/net/can/flexcan.c
drivers/net/can/peak_canfd/peak_canfd.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/mcba_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/usb/usb_8dev.c
drivers/net/can/vxcan.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/aquantia/atlantic/ver.h
drivers/net/ethernet/arc/emac.h
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/arc/emac_rockchip.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet.h
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/emac.h
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/rl.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/bpf/main.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qualcomm/emac/emac-phy.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/Kconfig
drivers/net/geneve.c
drivers/net/hippi/rrunner.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/macvlan.c
drivers/net/phy/at803x.c
drivers/net/phy/marvell.c
drivers/net/phy/marvell10g.c
drivers/net/phy/mdio-sun4i.c
drivers/net/phy/mdio-xgene.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/meson-gxl.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp-bus.c
drivers/net/phy/sfp.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pppoe.c
drivers/net/tap.c
drivers/net/thunderbolt.c
drivers/net/tun.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wireless/ath/ath9k/channel.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/pmc.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/st/cw1200/sta.c
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/nvdimm/btt.c
drivers/nvdimm/btt.h
drivers/nvdimm/pfn_devs.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/fc.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/loop.c
drivers/nvmem/meson-mx-efuse.c
drivers/of/dynamic.c
drivers/of/of_mdio.c
drivers/of/overlay.c
drivers/of/unittest.c
drivers/opp/Makefile
drivers/opp/ti-opp-supply.c [new file with mode: 0644]
drivers/parisc/dino.c
drivers/parisc/eisa_eeprom.c
drivers/parisc/lba_pci.c
drivers/pci/host/pci-hyperv.c
drivers/pci/host/pcie-rcar.c
drivers/pci/pci-driver.c
drivers/pci/pcie/portdrv_pci.c
drivers/phy/motorola/phy-cpcap-usb.c
drivers/phy/phy-core.c
drivers/phy/renesas/Kconfig
drivers/phy/rockchip/phy-rockchip-typec.c
drivers/phy/tegra/xusb.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-denverton.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-gemini.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
drivers/platform/chrome/Kconfig
drivers/platform/chrome/Makefile
drivers/platform/chrome/cros_ec_debugfs.c
drivers/platform/chrome/cros_ec_debugfs.h [deleted file]
drivers/platform/chrome/cros_ec_dev.c [deleted file]
drivers/platform/chrome/cros_ec_dev.h [deleted file]
drivers/platform/chrome/cros_ec_lightbar.c
drivers/platform/chrome/cros_ec_sysfs.c
drivers/platform/chrome/cros_ec_vbc.c
drivers/platform/x86/asus-wireless.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/dell-wmi.c
drivers/platform/x86/surfacepro3_button.c
drivers/platform/x86/wmi.c
drivers/pnp/pnpbios/core.c
drivers/pnp/quirks.c
drivers/power/avs/rockchip-io-domain.c
drivers/powercap/intel_rapl.c
drivers/powercap/powercap_sys.c
drivers/s390/Makefile
drivers/s390/block/Kconfig
drivers/s390/block/dasd.c
drivers/s390/block/dasd_3990_erp.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/block/xpram.c
drivers/s390/char/Kconfig
drivers/s390/char/Makefile
drivers/s390/char/defkeymap.map
drivers/s390/char/fs3270.c
drivers/s390/char/hmcdrv_mod.c
drivers/s390/char/monreader.c
drivers/s390/char/monwriter.c
drivers/s390/char/raw3270.c
drivers/s390/char/sclp_async.c
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/char/tape_class.c
drivers/s390/char/tape_core.c
drivers/s390/char/tty3270.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/vmur.c
drivers/s390/char/zcore.c
drivers/s390/cio/blacklist.h
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/chp.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/cio.c
drivers/s390/cio/cmf.c
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/isc.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/cio/scm.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_api.h
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_cca_key.h
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2a.h
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_error.h
drivers/s390/crypto/zcrypt_msgtype50.c
drivers/s390/crypto/zcrypt_msgtype50.h
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/s390/crypto/zcrypt_msgtype6.h
drivers/s390/crypto/zcrypt_pcixcc.c
drivers/s390/crypto/zcrypt_pcixcc.h
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/Kconfig
drivers/s390/net/ctcm_main.c
drivers/s390/net/fsm.c
drivers/s390/net/lcs.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3.h
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/s390/net/smsgiucv.c
drivers/s390/net/smsgiucv_app.c
drivers/s390/scsi/Makefile
drivers/s390/scsi/zfcp_aux.c
drivers/s390/virtio/Makefile
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/linit.c
drivers/scsi/aacraid/rx.c
drivers/scsi/aacraid/src.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfad_im.h
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/scsi_debugfs.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/amlogic/meson-gx-socinfo.c
drivers/spi/spi-armada-3700.c
drivers/spi/spi-atmel.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sun4i.c
drivers/spi/spi-xilinx.c
drivers/ssb/Kconfig
drivers/staging/android/ashmem.c
drivers/staging/android/ion/Kconfig
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_cma_heap.c
drivers/staging/ccree/ssi_hash.c
drivers/staging/comedi/drivers/ni_atmio.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
drivers/staging/lustre/lnet/lnet/lib-socket.c
drivers/staging/lustre/lustre/llite/file.c
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/staging/media/atomisp/include/linux/atomisp.h
drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
drivers/staging/octeon-usb/octeon-hcd.c
drivers/staging/pi433/rf69.c
drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
drivers/target/target_core_pscsi.c
drivers/tee/optee/core.c
drivers/thermal/cpu_cooling.c
drivers/thunderbolt/nhi.c
drivers/tty/n_tty.c
drivers/tty/serdev/core.c
drivers/tty/serdev/serdev-ttyport.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/8250/8250_pci.c
drivers/usb/chipidea/ci_hdrc_msm.c
drivers/usb/common/ulpi.c
drivers/usb/core/config.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/legacy/Kconfig
drivers/usb/gadget/udc/bdc/bdc_core.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/ehci-dbg.c
drivers/usb/host/xhci-debugfs.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/misc/usb3503.c
drivers/usb/mon/mon_bin.c
drivers/usb/musb/da8xx.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/usb_debug.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/Kconfig
drivers/usb/typec/ucsi/Kconfig
drivers/usb/usbip/stub_dev.c
drivers/usb/usbip/stub_main.c
drivers/usb/usbip/stub_rx.c
drivers/usb/usbip/stub_tx.c
drivers/usb/usbip/usbip_common.c
drivers/usb/usbip/usbip_common.h
drivers/usb/usbip/vhci_hcd.c
drivers/usb/usbip/vhci_rx.c
drivers/usb/usbip/vhci_sysfs.c
drivers/usb/usbip/vhci_tx.c
drivers/usb/usbip/vudc_rx.c
drivers/usb/usbip/vudc_tx.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/video/backlight/apple_bl.c
drivers/video/backlight/corgi_lcd.c
drivers/video/backlight/tdo24m.c
drivers/video/backlight/tosa_lcd.c
drivers/virtio/virtio.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mmio.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/rave-sp-wdt.c [new file with mode: 0644]
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/gntdev.c
drivers/xen/pvcalls-front.c
fs/9p/vfs_super.c
fs/adfs/super.c
fs/affs/amigaffs.c
fs/affs/bitmap.c
fs/affs/super.c
fs/afs/dir.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/afs/security.c
fs/afs/super.c
fs/afs/write.c
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/befs/ChangeLog
fs/befs/linuxvfs.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tree-checker.c
fs/btrfs/tree-checker.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/inode.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/xattr.c
fs/coda/inode.c
fs/cramfs/Kconfig
fs/cramfs/inode.c
fs/ecryptfs/main.c
fs/efs/super.c
fs/exec.c
fs/ext2/balloc.c
fs/ext2/ialloc.c
fs/ext2/super.c
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/f2fs.h
fs/f2fs/gc.c
fs/f2fs/recovery.c
fs/f2fs/super.c
fs/fat/fatent.c
fs/fat/inode.c
fs/fat/misc.c
fs/fat/namei_msdos.c
fs/freevxfs/vxfs_super.c
fs/fs-writeback.c
fs/fuse/inode.c
fs/gfs2/ops_fstype.c
fs/gfs2/super.c
fs/gfs2/trans.c
fs/hfs/mdb.c
fs/hfs/super.c
fs/hfsplus/super.c
fs/hpfs/dir.c
fs/hpfs/dnode.c
fs/hpfs/map.c
fs/hpfs/super.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/isofs/inode.c
fs/jffs2/fs.c
fs/jffs2/os-linux.h
fs/jffs2/super.c
fs/jfs/super.c
fs/kernfs/mount.c
fs/libfs.c
fs/lockd/host.c
fs/lockd/mon.c
fs/lockd/svc.c
fs/lockd/svcsubs.c
fs/locks.c
fs/mbcache.c
fs/minix/inode.c
fs/namei.c
fs/namespace.c
fs/ncpfs/inode.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4client.c
fs/nfs/nfs4state.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfs_common/grace.c
fs/nfsd/auth.c
fs/nfsd/export.c
fs/nfsd/netns.h
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfssvc.c
fs/nilfs2/segment.c
fs/nilfs2/super.c
fs/nilfs2/the_nilfs.c
fs/notify/fsnotify.c
fs/nsfs.c
fs/ntfs/super.c
fs/ocfs2/file.c
fs/ocfs2/super.c
fs/ocfs2/xattr.c
fs/openpromfs/inode.c
fs/orangefs/devorangefs-req.c
fs/orangefs/file.c
fs/orangefs/orangefs-kernel.h
fs/orangefs/super.c
fs/orangefs/waitqueue.c
fs/overlayfs/Kconfig
fs/overlayfs/dir.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/proc/array.c
fs/proc/base.c
fs/proc/inode.c
fs/proc/root.c
fs/proc_namespace.c
fs/qnx4/inode.c
fs/qnx6/inode.c
fs/quota/dquot.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/reiserfs/prints.c
fs/reiserfs/super.c
fs/reiserfs/xattr.c
fs/romfs/super.c
fs/squashfs/super.c
fs/statfs.c
fs/super.c
fs/sysfs/mount.c
fs/sysv/inode.c
fs/sysv/super.c
fs/ubifs/file.c
fs/ubifs/io.c
fs/ubifs/super.c
fs/ubifs/ubifs.h
fs/udf/super.c
fs/ufs/balloc.c
fs/ufs/ialloc.c
fs/ufs/super.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_attr_leaf.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_defer.h
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_ialloc.h
fs/xfs/libxfs/xfs_iext_tree.c
fs/xfs/libxfs/xfs_refcount.c
fs/xfs/libxfs/xfs_rmap.c
fs/xfs/libxfs/xfs_rmap.h
fs/xfs/scrub/inode.c
fs/xfs/scrub/quota.c
fs/xfs/scrub/scrub.c
fs/xfs/scrub/trace.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap_item.c
fs/xfs/xfs_bmap_item.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_dquot_item.c
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_icache.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_iomap.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_refcount_item.c
fs/xfs/xfs_refcount_item.h
fs/xfs/xfs_reflink.c
fs/xfs/xfs_super.c
fs/xfs/xfs_super.h
fs/xfs/xfs_symlink.c
fs/xfs/xfs_trace.c
include/acpi/acconfig.h
include/acpi/acexcep.h
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/acpi/acpixf.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actypes.h
include/asm-generic/mm_hooks.h
include/asm-generic/pgtable.h
include/asm-generic/vmlinux.lds.h
include/crypto/if_alg.h
include/crypto/internal/hash.h
include/crypto/mcryptd.h
include/drm/drm_connector.h
include/drm/drm_edid.h
include/drm/drm_mode_config.h
include/drm/ttm/ttm_page_alloc.h
include/kvm/arm_arch_timer.h
include/lib/libgcc.h [deleted file]
include/linux/acpi.h
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/bpf_verifier.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/completion.h
include/linux/cpu.h
include/linux/cpu_cooling.h
include/linux/cpuhotplug.h
include/linux/crash_core.h
include/linux/crc-ccitt.h
include/linux/cred.h
include/linux/debugfs.h
include/linux/delayacct.h
include/linux/dma-mapping.h
include/linux/efi.h
include/linux/fs.h
include/linux/fscache.h
include/linux/ftrace.h
include/linux/gpio/driver.h
include/linux/hugetlb.h
include/linux/hyperv.h
include/linux/idr.h
include/linux/iio/adc/stm32-dfsdm-adc.h [new file with mode: 0644]
include/linux/iio/consumer.h
include/linux/iio/hw-consumer.h [new file with mode: 0644]
include/linux/iio/iio.h
include/linux/iio/timer/stm32-lptim-trigger.h
include/linux/iio/types.h
include/linux/init_task.h
include/linux/intel-pti.h [new file with mode: 0644]
include/linux/ipv6.h
include/linux/irq.h
include/linux/irqdesc.h
include/linux/irqdomain.h
include/linux/irqflags.h
include/linux/jump_label.h
include/linux/kallsyms.h
include/linux/kmemcheck.h [deleted file]
include/linux/kvm_host.h
include/linux/libgcc.h [new file with mode: 0644]
include/linux/lockdep.h
include/linux/mfd/axp20x.h
include/linux/mfd/cros_ec.h
include/linux/mfd/cros_ec_commands.h
include/linux/mfd/palmas.h
include/linux/mfd/rave-sp.h [new file with mode: 0644]
include/linux/mfd/rtsx_common.h [deleted file]
include/linux/mfd/rtsx_pci.h [deleted file]
include/linux/mfd/rtsx_usb.h [deleted file]
include/linux/mfd/stm32-lptimer.h
include/linux/mfd/stm32-timers.h
include/linux/mfd/tmio.h
include/linux/migrate.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/netlink.h
include/linux/oom.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/pm.h
include/linux/pm_wakeup.h
include/linux/pti.h
include/linux/ptr_ring.h
include/linux/rbtree.h
include/linux/rculist_nulls.h
include/linux/rtsx_common.h [new file with mode: 0644]
include/linux/rtsx_pci.h [new file with mode: 0644]
include/linux/rtsx_usb.h [new file with mode: 0644]
include/linux/rwlock_types.h
include/linux/sched.h
include/linux/sched/coredump.h
include/linux/serdev.h
include/linux/sh_eth.h
include/linux/skbuff.h
include/linux/sound.h
include/linux/spi/spi.h
include/linux/spinlock.h
include/linux/spinlock_types.h
include/linux/string.h
include/linux/sunrpc/cache.h
include/linux/suspend.h
include/linux/swapops.h
include/linux/sysfs.h
include/linux/tcp.h
include/linux/tick.h
include/linux/timer.h
include/linux/trace.h
include/linux/usb/usbnet.h
include/net/arp.h
include/net/cfg80211.h
include/net/dst.h
include/net/gue.h
include/net/ip.h
include/net/ipv6.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/pkt_cls.h
include/net/red.h
include/net/sch_generic.h
include/net/sctp/checksum.h
include/net/sctp/sctp.h
include/net/sctp/stream_sched.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tc_act/tc_sample.h
include/net/tcp.h
include/net/tls.h
include/net/vxlan.h
include/net/xfrm.h
include/scsi/libsas.h
include/sound/hdaudio_ext.h
include/sound/pcm.h
include/sound/rt5514.h
include/sound/rt5645.h
include/sound/soc-acpi-intel-match.h
include/sound/soc-acpi.h
include/sound/soc-dai.h
include/sound/soc.h
include/trace/events/clk.h
include/trace/events/kvm.h
include/trace/events/preemptirq.h
include/trace/events/rxrpc.h
include/trace/events/tcp.h
include/trace/events/thermal.h
include/trace/events/xdp.h
include/uapi/asm-generic/bpf_perf_event.h [new file with mode: 0644]
include/uapi/linux/bcache.h
include/uapi/linux/bfs_fs.h
include/uapi/linux/bpf_perf_event.h
include/uapi/linux/if_ether.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/kvm.h
include/uapi/linux/libc-compat.h
include/uapi/linux/netfilter/nf_conntrack_common.h
include/uapi/linux/openvswitch.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/rxrpc.h
include/uapi/linux/usb/ch9.h
include/uapi/linux/vm_sockets_diag.h
include/uapi/sound/asound.h
include/uapi/sound/snd_sst_tokens.h
include/xen/balloon.h
init/Kconfig
init/Makefile
init/init_task.c
init/main.c
ipc/mqueue.c
kernel/acct.c
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/hashtab.c
kernel/bpf/inode.c
kernel/bpf/offload.c
kernel/bpf/sockmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/cgroup/debug.c
kernel/cgroup/stat.c
kernel/configs/nopm.config [new file with mode: 0644]
kernel/cpu.c
kernel/crash_core.c
kernel/debug/kdb/kdb_io.c
kernel/delayacct.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/groups.c
kernel/irq/debug.h
kernel/irq/debugfs.c
kernel/irq/generic-chip.c
kernel/irq/internals.h
kernel/irq/irqdomain.c
kernel/irq/matrix.c
kernel/irq/msi.c
kernel/jump_label.c
kernel/kallsyms.c
kernel/kcov.c
kernel/locking/lockdep.c
kernel/locking/rtmutex.c
kernel/locking/rtmutex_common.h
kernel/locking/spinlock.c
kernel/module.c
kernel/pid.c
kernel/power/main.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/printk/printk.c
kernel/sched/completion.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/membarrier.c
kernel/sched/rt.c
kernel/sched/wait.c
kernel/time/Kconfig
kernel/time/hrtimer.c
kernel/time/posix-timers.c
kernel/time/tick-sched.c
kernel/time/timer.c
kernel/trace/Kconfig
kernel/trace/blktrace.c
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_functions.c
kernel/trace/trace_stack.c
kernel/uid16.c
kernel/workqueue.c
lib/Kconfig.debug
lib/ashldi3.c
lib/ashrdi3.c
lib/asn1_decoder.c
lib/cmpdi2.c
lib/crc-ccitt.c
lib/kobject_uevent.c
lib/lshrdi3.c
lib/mpi/longlong.h
lib/muldi3.c
lib/nlattr.c
lib/oid_registry.c
lib/rbtree.c
lib/test_bpf.c
lib/test_printf.c
lib/timerqueue.c
lib/ucmpdi2.c
lib/vsprintf.c
mm/backing-dev.c
mm/debug.c
mm/early_ioremap.c
mm/frame_vector.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/report.c
mm/kmemcheck.c [deleted file]
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/mprotect.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_owner.c
mm/page_vma_mapped.c
mm/percpu.c
mm/shmem.c
mm/slab.c
mm/sparse.c
mm/vmscan.c
mm/zsmalloc.c
net/8021q/vlan.c
net/9p/trans_fd.c
net/9p/trans_xen.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/fragmentation.c
net/batman-adv/tp_meter.c
net/bluetooth/l2cap_core.c
net/bridge/br_netlink.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/caif/cfcnfg.c
net/caif/cfctrl.c
net/can/af_can.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/netprio_cgroup.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock_diag.c
net/core/sysctl_net_core.c
net/dccp/ccids/ccid2.c
net/dccp/minisocks.c
net/dccp/proto.c
net/dsa/dsa2.c
net/dsa/slave.c
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/igmp.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_rate.c
net/ipv4/tcp_recovery.c
net/ipv4/tcp_timer.c
net/ipv4/udp_offload.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv6/af_inet6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/exthdrs.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_MASQUERADE.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp_offload.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_mode_tunnel.c
net/kcm/kcmsock.c
net/key/af_key.c
net/mac80211/ht.c
net/mac80211/mesh_hwmp.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/tx.c
net/netfilter/nf_conntrack_h323_asn1.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_exthdr.c
net/netfilter/x_tables.c
net/netfilter/xt_bpf.c
net/netfilter/xt_osf.c
net/netlink/af_netlink.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/packet/internal.h
net/rds/rdma.c
net/rds/send.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_send.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/misc.c
net/rxrpc/net_ns.c
net/rxrpc/output.c
net/rxrpc/recvmsg.c
net/rxrpc/sendmsg.c
net/rxrpc/sysctl.c
net/sched/act_gact.c
net/sched/act_meta_mark.c
net/sched/act_meta_skbtcindex.c
net/sched/act_mirred.c
net/sched/act_sample.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/cls_u32.c
net/sched/em_nbyte.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_choke.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_ingress.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sctp/chunk.c
net/sctp/debug.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/offload.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/stream.c
net/sctp/stream_sched.c
net/sctp/stream_sched_prio.c
net/sctp/stream_sched_rr.c
net/sctp/transport.c
net/sctp/ulpqueue.c
net/socket.c
net/strparser/strparser.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/sunrpc/svcauth_unix.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/sunrpc/xprtsock.c
net/tipc/bearer.c
net/tipc/group.c
net/tipc/monitor.c
net/tipc/node.c
net/tipc/server.c
net/tipc/socket.c
net/tipc/udp_media.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/hyperv_transport.c
net/vmw_vsock/vmci_transport.c
net/wireless/Kconfig
net/wireless/Makefile
net/wireless/certs/sforshee.hex [new file with mode: 0644]
net/wireless/certs/sforshee.x509 [deleted file]
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/wext-compat.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/bpf/bpf_load.c
scripts/Makefile.build
scripts/bloat-o-meter
scripts/checkpatch.pl
scripts/decodecode
scripts/faddr2line
scripts/gdb/linux/tasks.py
scripts/genksyms/.gitignore
scripts/kconfig/expr.c
scripts/kernel-doc
security/Kconfig
security/apparmor/apparmorfs.c
security/apparmor/domain.c
security/apparmor/include/audit.h
security/apparmor/include/lib.h
security/apparmor/include/perms.h
security/apparmor/ipc.c
security/apparmor/mount.c
security/commoncap.c
security/keys/key.c
security/keys/keyctl.c
security/keys/request_key.c
sound/core/oss/pcm_oss.c
sound/core/oss/pcm_plugin.c
sound/core/pcm.c
sound/core/pcm_lib.c
sound/core/pcm_misc.c
sound/core/pcm_native.c
sound/core/rawmidi.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_clientmgr.h
sound/core/seq/seq_queue.c
sound/core/seq/seq_timer.c
sound/core/seq/seq_timer.h
sound/drivers/aloop.c
sound/drivers/dummy.c
sound/hda/ext/hdac_ext_bus.c
sound/hda/hdac_i915.c
sound/isa/gus/gus_dma.c
sound/mips/hal2.c
sound/mips/sgio2audio.c
sound/pci/hda/Kconfig
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/prodigy_hifi.c
sound/pci/korg1212/korg1212.c
sound/soc/Kconfig
sound/soc/Makefile
sound/soc/amd/acp-pcm-dma.c
sound/soc/atmel/Kconfig
sound/soc/atmel/atmel-classd.c
sound/soc/au1x/ac97c.c
sound/soc/bcm/bcm2835-i2s.c
sound/soc/cirrus/ep93xx-ac97.c
sound/soc/codecs/88pm860x-codec.c
sound/soc/codecs/Kconfig
sound/soc/codecs/Makefile
sound/soc/codecs/cq93vc.c
sound/soc/codecs/cs35l32.c
sound/soc/codecs/cs35l34.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/cs42l56.c
sound/soc/codecs/cs42l73.c
sound/soc/codecs/cs47l24.c
sound/soc/codecs/cx20442.c
sound/soc/codecs/da7213.c
sound/soc/codecs/da7218.c
sound/soc/codecs/dmic.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/max98373.c [new file with mode: 0644]
sound/soc/codecs/max98373.h [new file with mode: 0644]
sound/soc/codecs/max98926.c
sound/soc/codecs/max98927.c
sound/soc/codecs/mc13783.c
sound/soc/codecs/msm8916-wcd-analog.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/nau8540.c
sound/soc/codecs/nau8540.h
sound/soc/codecs/nau8824.c
sound/soc/codecs/nau8825.c
sound/soc/codecs/nau8825.h
sound/soc/codecs/pcm186x-i2c.c [new file with mode: 0644]
sound/soc/codecs/pcm186x-spi.c [new file with mode: 0644]
sound/soc/codecs/pcm186x.c [new file with mode: 0644]
sound/soc/codecs/pcm186x.h [new file with mode: 0644]
sound/soc/codecs/pcm512x-spi.c
sound/soc/codecs/rl6231.c
sound/soc/codecs/rt5514-spi.c
sound/soc/codecs/rt5514.c
sound/soc/codecs/rt5514.h
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5645.h
sound/soc/codecs/rt5663.c
sound/soc/codecs/rt5663.h
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/si476x.c
sound/soc/codecs/sn95031.c [deleted file]
sound/soc/codecs/sn95031.h [deleted file]
sound/soc/codecs/spdif_receiver.c
sound/soc/codecs/spdif_transmitter.c
sound/soc/codecs/tas5720.c
sound/soc/codecs/tas5720.h
sound/soc/codecs/tas6424.c [new file with mode: 0644]
sound/soc/codecs/tas6424.h [new file with mode: 0644]
sound/soc/codecs/tfa9879.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/tlv320aic31xx.h
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/tlv320aic32x4.h
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/tlv320dac33.c
sound/soc/codecs/ts3a227e.c
sound/soc/codecs/tscs42xx.c [new file with mode: 0644]
sound/soc/codecs/tscs42xx.h [new file with mode: 0644]
sound/soc/codecs/twl4030.c
sound/soc/codecs/twl6040.c
sound/soc/codecs/uda1380.c
sound/soc/codecs/wm0010.c
sound/soc/codecs/wm2000.c
sound/soc/codecs/wm2200.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8350.c
sound/soc/codecs/wm8400.c
sound/soc/codecs/wm8903.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8997.c
sound/soc/codecs/wm8998.c
sound/soc/codecs/wm_adsp.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/fsl/eukrea-tlv320.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/fsl/fsl_asrc.h
sound/soc/fsl/fsl_dma.c
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/fsl_ssi.h
sound/soc/fsl/fsl_ssi_dbg.c
sound/soc/hisilicon/hi6210-i2s.c
sound/soc/intel/Kconfig
sound/soc/intel/Makefile
sound/soc/intel/atom/sst/sst_acpi.c
sound/soc/intel/atom/sst/sst_stream.c
sound/soc/intel/boards/Kconfig
sound/soc/intel/boards/bytcht_da7213.c
sound/soc/intel/boards/bytcht_es8316.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/boards/bytcr_rt5651.c
sound/soc/intel/boards/cht_bsw_rt5645.c
sound/soc/intel/boards/cht_bsw_rt5672.c
sound/soc/intel/boards/haswell.c
sound/soc/intel/boards/kbl_rt5663_max98927.c
sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
sound/soc/intel/boards/mfld_machine.c [deleted file]
sound/soc/intel/common/sst-dsp.c
sound/soc/intel/skylake/bxt-sst.c
sound/soc/intel/skylake/cnl-sst.c
sound/soc/intel/skylake/skl-i2s.h [new file with mode: 0644]
sound/soc/intel/skylake/skl-messages.c
sound/soc/intel/skylake/skl-nhlt.c
sound/soc/intel/skylake/skl-pcm.c
sound/soc/intel/skylake/skl-ssp-clk.h [new file with mode: 0644]
sound/soc/intel/skylake/skl-sst-dsp.c
sound/soc/intel/skylake/skl-sst-dsp.h
sound/soc/intel/skylake/skl-sst-utils.c
sound/soc/intel/skylake/skl-sst.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/intel/skylake/skl.c
sound/soc/intel/skylake/skl.h
sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
sound/soc/mediatek/mt2701/mt2701-afe-common.h
sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
sound/soc/mediatek/mt2701/mt2701-reg.h
sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
sound/soc/mediatek/mt8173/mt8173-rt5650.c
sound/soc/mxs/mxs-sgtl5000.c
sound/soc/nuc900/nuc900-ac97.c
sound/soc/omap/ams-delta.c
sound/soc/qcom/apq8016_sbc.c
sound/soc/rockchip/rk3399_gru_sound.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/rockchip/rockchip_spdif.c
sound/soc/samsung/bells.c
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/dma.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/ssi.c
sound/soc/sh/rcar/ssiu.c
sound/soc/soc-acpi.c
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/soc/soc-io.c
sound/soc/soc-ops.c
sound/soc/soc-utils.c
sound/soc/stm/Kconfig
sound/soc/stm/Makefile
sound/soc/stm/stm32_adfsdm.c [new file with mode: 0644]
sound/soc/stm/stm32_sai.c
sound/soc/sunxi/sun4i-codec.c
sound/soc/sunxi/sun4i-i2s.c
sound/soc/uniphier/Kconfig [new file with mode: 0644]
sound/soc/uniphier/Makefile [new file with mode: 0644]
sound/soc/uniphier/evea.c [new file with mode: 0644]
sound/soc/ux500/mop500.c
sound/soc/ux500/ux500_pcm.c
sound/sound_core.c
sound/usb/card.c
sound/usb/mixer.c
sound/usb/mixer_quirks.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm_perf.h
tools/arch/s390/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/ptrace.h [new file with mode: 0644]
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/bpf/bpftool/Documentation/Makefile
tools/bpf/bpftool/Makefile
tools/bpf/bpftool/main.c
tools/bpf/bpftool/main.h
tools/bpf/bpftool/map.c
tools/bpf/bpftool/prog.c
tools/hv/hv_kvp_daemon.c
tools/include/linux/compiler.h
tools/include/linux/kmemcheck.h [deleted file]
tools/include/linux/lockdep.h
tools/include/uapi/asm-generic/bpf_perf_event.h [new file with mode: 0644]
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf_perf_event.h
tools/include/uapi/linux/kcmp.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/prctl.h
tools/kvm/kvm_stat/kvm_stat
tools/kvm/kvm_stat/kvm_stat.txt
tools/objtool/Makefile
tools/objtool/arch/x86/decode.c
tools/objtool/arch/x86/lib/x86-opcode-map.txt
tools/objtool/builtin-orc.c
tools/objtool/check.c
tools/objtool/check.h
tools/objtool/elf.c
tools/objtool/orc_dump.c
tools/objtool/orc_gen.c
tools/perf/Makefile.config
tools/perf/arch/s390/Makefile
tools/perf/arch/s390/include/perf_regs.h
tools/perf/arch/s390/util/dwarf-regs.c
tools/perf/bench/numa.c
tools/perf/builtin-help.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/check-headers.sh
tools/perf/jvmti/jvmti_agent.c
tools/perf/jvmti/jvmti_agent.h
tools/perf/jvmti/libjvmti.c
tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
tools/perf/tests/shell/trace+probe_vfs_getname.sh
tools/perf/tests/task-exit.c
tools/perf/trace/beauty/mmap.c
tools/perf/util/annotate.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/intel-pt-decoder/inat.h
tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
tools/perf/util/machine.c
tools/perf/util/mmap.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/pmu.c
tools/power/acpi/tools/acpidump/apmain.c
tools/power/cpupower/bench/system.c
tools/power/cpupower/lib/cpufreq.h
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_align.c
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/test_verifier_log.c
tools/testing/selftests/net/config
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/ldt_gdt.c
tools/testing/selftests/x86/test_vsyscall.c [new file with mode: 0644]
tools/usb/usbip/libsrc/vhci_driver.c
tools/usb/usbip/src/utils.c
tools/virtio/ringtest/ptr_ring.c
tools/vm/slabinfo-gnuplot.sh
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arm.c
virt/kvm/arm/hyp/timer-sr.c
virt/kvm/arm/hyp/vgic-v2-sr.c
virt/kvm/arm/mmio.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-irqfd.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic-v4.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/kvm_main.c

index 1469ff0..e18cab7 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -107,6 +107,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
 Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
 Mark Brown <broonie@sirena.org.uk>
+Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
 Matthieu CASTET <castet.matthieu@free.fr>
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32
new file mode 100644 (file)
index 0000000..da98223
--- /dev/null
@@ -0,0 +1,16 @@
+What:          /sys/bus/iio/devices/iio:deviceX/in_voltage_spi_clk_freq
+KernelVersion: 4.14
+Contact:       arnaud.pouliquen@st.com
+Description:
+               For audio purpose only.
+               Used by audio driver to set/get the spi input frequency.
+               This is mandatory if DFSDM is slave on SPI bus, to
+               provide information on the SPI clock frequency during runtime
+               Notice that the SPI frequency should be a multiple of sample
+               frequency to ensure the precision.
+               if DFSDM input is SPI master
+                       Reading  SPI clkout frequency,
+                       error on writing
+               If DFSDM input is SPI Slave:
+                       Reading returns value previously set.
+                       Writing value before starting conversions.
\ No newline at end of file
index d6d862d..bfd29bc 100644 (file)
@@ -375,3 +375,19 @@ Contact:   Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   information about CPUs heterogeneity.
 
                cpu_capacity: capacity of cpu#.
+
+What:          /sys/devices/system/cpu/vulnerabilities
+               /sys/devices/system/cpu/vulnerabilities/meltdown
+               /sys/devices/system/cpu/vulnerabilities/spectre_v1
+               /sys/devices/system/cpu/vulnerabilities/spectre_v2
+Date:          January 2018
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:   Information about CPU vulnerabilities
+
+               The files are named after the code names of CPU
+               vulnerabilities. The output of those files reflects the
+               state of the CPUs in the system. Possible output values:
+
+               "Not affected"    CPU is not affected by the vulnerability
+               "Vulnerable"      CPU is affected and no mitigation in effect
+               "Mitigation: $M"  CPU is affected and mitigation $M is in effect
index b2598cc..7242cbd 100644 (file)
@@ -109,6 +109,7 @@ parameter is applicable::
        IPV6    IPv6 support is enabled.
        ISAPNP  ISA PnP code is enabled.
        ISDN    Appropriate ISDN support is enabled.
+       ISOL    CPU Isolation is enabled.
        JOY     Appropriate joystick support is enabled.
        KGDB    Kernel debugger support is enabled.
        KVM     Kernel Virtual Machine support is enabled.
index 6571fbf..1bdcf57 100644 (file)
                        This facility can be used to prevent such uncontrolled
                        GPE floodings.
                        Format: <int>
-                       Support masking of GPEs numbered from 0x00 to 0x7f.
 
        acpi_no_auto_serialize  [HW,ACPI]
                        Disable auto-serialization of AML methods
 
        acpi_sleep=     [HW,ACPI] Sleep options
                        Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
-                                 old_ordering, nonvs, sci_force_enable }
+                                 old_ordering, nonvs, sci_force_enable, nobl }
                        See Documentation/power/video.txt for information on
                        s3_bios and s3_mode.
                        s3_beep is for debugging; it makes the PC's speaker beep
                        sci_force_enable causes the kernel to set SCI_EN directly
                        on resume from S1/S3 (which is against the ACPI spec,
                        but some broken systems don't work without it).
+                       nobl causes the internal blacklist of systems known to
+                       behave incorrectly in some ways with respect to system
+                       suspend and resume to be ignored (use wisely).
 
        acpi_use_timer_override [HW,ACPI]
                        Use timer override. For some broken Nvidia NF5 boards
                        not play well with APC CPU idle - disable it if you have
                        APC and your system crashes randomly.
 
-       apic=           [APIC,X86-32] Advanced Programmable Interrupt Controller
+       apic=           [APIC,X86] Advanced Programmable Interrupt Controller
                        Change the output verbosity whilst booting
                        Format: { quiet (default) | verbose | debug }
                        Change the amount of debugging information output
                        when initialising the APIC and IO-APIC components.
+                       For X86-32, this can also be used to specify an APIC
+                       driver name.
+                       Format: apic=driver_name
+                       Examples: apic=bigsmp
 
        apic_extnmi=    [APIC,X86] External NMI delivery setting
                        Format: { bsp (default) | all | none }
                        It will be ignored when crashkernel=X,high is not used
                        or memory reserved is below 4G.
 
-       crossrelease_fullstack
-                       [KNL] Allow to record full stack trace in cross-release
-
        cryptomgr.notests
                         [KNL] Disable crypto self-tests
 
        isapnp=         [ISAPNP]
                        Format: <RDP>,<reset>,<pci_scan>,<verbosity>
 
-       isolcpus=       [KNL,SMP] Isolate a given set of CPUs from disturbance.
+       isolcpus=       [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance.
                        [Deprecated - use cpusets instead]
                        Format: [flag-list,]<cpu-list>
 
        nosmt           [KNL,S390] Disable symmetric multithreading (SMT).
                        Equivalent to smt=1.
 
+       nospectre_v2    [X86] Disable all mitigations for the Spectre variant 2
+                       (indirect branch prediction) vulnerability. System may
+                       allow data leaks with this option, which is equivalent
+                       to spectre_v2=off.
+
        noxsave         [BUGS=X86] Disables x86 extended register state save
                        and restore using xsave. The kernel will fallback to
                        enabling legacy floating-point and sse state.
                        Valid arguments: on, off
                        Default: on
 
-       nohz_full=      [KNL,BOOT]
+       nohz_full=      [KNL,BOOT,SMP,ISOL]
                        The argument is a cpu list, as described above.
                        In kernels built with CONFIG_NO_HZ_FULL=y, set
                        the specified list of CPUs whose tick will be stopped
                pcie_scan_all   Scan all possible PCIe devices.  Otherwise we
                                only look for one device below a PCIe downstream
                                port.
+               big_root_window Try to add a big 64bit memory window to the PCIe
+                               root complex on AMD CPUs. Some GFX hardware
+                               can resize a BAR to allow access to all VRAM.
+                               Adding the window is slightly risky (it may
+                               conflict with unreported devices), so this
+                               taints the kernel.
 
        pcie_aspm=      [PCIE] Forcibly enable or disable PCIe Active State Power
                        Management.
        pt.             [PARIDE]
                        See Documentation/blockdev/paride.txt.
 
+       pti=            [X86_64] Control Page Table Isolation of user and
+                       kernel address spaces.  Disabling this feature
+                       removes hardening, but improves performance of
+                       system calls and interrupts.
+
+                       on   - unconditionally enable
+                       off  - unconditionally disable
+                       auto - kernel detects whether your CPU model is
+                              vulnerable to issues that PTI mitigates
+
+                       Not specifying this option is equivalent to pti=auto.
+
+       nopti           [X86_64]
+                       Equivalent to pti=off
+
        pty.legacy_count=
                        [KNL] Number of legacy pty's. Overwrites compiled-in
                        default number.
        sonypi.*=       [HW] Sony Programmable I/O Control Device driver
                        See Documentation/laptops/sonypi.txt
 
+       spectre_v2=     [X86] Control mitigation of Spectre variant 2
+                       (indirect branch speculation) vulnerability.
+
+                       on   - unconditionally enable
+                       off  - unconditionally disable
+                       auto - kernel detects whether your CPU model is
+                              vulnerable
+
+                       Selecting 'on' will, and 'auto' may, choose a
+                       mitigation method at run time according to the
+                       CPU, the available microcode, the setting of the
+                       CONFIG_RETPOLINE configuration option, and the
+                       compiler with which the kernel was built.
+
+                       Specific mitigations can also be selected manually:
+
+                       retpoline         - replace indirect branches
+                       retpoline,generic - google's original retpoline
+                       retpoline,amd     - AMD-specific minimal thunk
+
+                       Not specifying this option is equivalent to
+                       spectre_v2=auto.
+
        spia_io_base=   [HW,MTD]
        spia_fio_base=
        spia_pedr=
index de50a85..9b55952 100644 (file)
@@ -230,7 +230,7 @@ If supported by your machine this will be exposed by the WMI bus with
 a sysfs attribute called "force_power".
 
 For example the intel-wmi-thunderbolt driver exposes this attribute in:
-  /sys/devices/platform/PNP0C14:00/wmi_bus/wmi_bus-PNP0C14:00/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power
+  /sys/bus/wmi/devices/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power
 
   To force the power to on, write 1 to this attribute file.
   To disable force power, write 0 to this attribute file.
index 304bf22..fc1c884 100644 (file)
@@ -75,3 +75,4 @@ stable kernels.
 | Qualcomm Tech. | Falkor v1       | E1003           | QCOM_FALKOR_ERRATUM_1003    |
 | Qualcomm Tech. | Falkor v1       | E1009           | QCOM_FALKOR_ERRATUM_1009    |
 | Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
+| Qualcomm Tech. | Falkor v{1,2}   | E1041           | QCOM_FALKOR_ERRATUM_1041    |
index 779211f..2cddab7 100644 (file)
@@ -898,6 +898,13 @@ controller implements weight and absolute bandwidth limit models for
 normal scheduling policy and absolute bandwidth allocation model for
 realtime scheduling policy.
 
+WARNING: cgroup2 doesn't yet support control of realtime processes and
+the cpu controller can only be enabled when all RT processes are in
+the root cgroup.  Be aware that system management software may already
+have placed RT processes into nonroot cgroups during the system boot
+process, and these processes may need to be moved to the root cgroup
+before the cpu controller can be enabled.
+
 
 CPU Interface Files
 ~~~~~~~~~~~~~~~~~~~
index 0054bd4..4da67b6 100644 (file)
@@ -225,9 +225,9 @@ interrupts.
 
 The following control flow is implemented (simplified excerpt)::
 
-    :c:func:`desc->irq_data.chip->irq_mask_ack`;
+    desc->irq_data.chip->irq_mask_ack();
     handle_irq_event(desc->action);
-    :c:func:`desc->irq_data.chip->irq_unmask`;
+    desc->irq_data.chip->irq_unmask();
 
 
 Default Fast EOI IRQ flow handler
@@ -239,7 +239,7 @@ which only need an EOI at the end of the handler.
 The following control flow is implemented (simplified excerpt)::
 
     handle_irq_event(desc->action);
-    :c:func:`desc->irq_data.chip->irq_eoi`;
+    desc->irq_data.chip->irq_eoi();
 
 
 Default Edge IRQ flow handler
@@ -251,15 +251,15 @@ interrupts.
 The following control flow is implemented (simplified excerpt)::
 
     if (desc->status & running) {
-        :c:func:`desc->irq_data.chip->irq_mask_ack`;
+        desc->irq_data.chip->irq_mask_ack();
         desc->status |= pending | masked;
         return;
     }
-    :c:func:`desc->irq_data.chip->irq_ack`;
+    desc->irq_data.chip->irq_ack();
     desc->status |= running;
     do {
         if (desc->status & masked)
-            :c:func:`desc->irq_data.chip->irq_unmask`;
+            desc->irq_data.chip->irq_unmask();
         desc->status &= ~pending;
         handle_irq_event(desc->action);
     } while (status & pending);
@@ -293,10 +293,10 @@ simplified version without locking.
 The following control flow is implemented (simplified excerpt)::
 
     if (desc->irq_data.chip->irq_ack)
-        :c:func:`desc->irq_data.chip->irq_ack`;
+        desc->irq_data.chip->irq_ack();
     handle_irq_event(desc->action);
     if (desc->irq_data.chip->irq_eoi)
-            :c:func:`desc->irq_data.chip->irq_eoi`;
+        desc->irq_data.chip->irq_eoi();
 
 
 EOI Edge IRQ flow handler
index 2980145..43b5a71 100644 (file)
@@ -15,7 +15,7 @@ Required properties:
 
 Example:
 
-       ccn@0x2000000000 {
+       ccn@2000000000 {
                compatible = "arm,ccn-504";
                reg = <0x20 0x00000000 0 0x1000000>;
                interrupts = <0 181 4>;
index 51336e5..35c3c34 100644 (file)
@@ -14,3 +14,22 @@ following property before the previous one:
 Example:
 
 compatible = "marvell,armada-3720-db", "marvell,armada3720", "marvell,armada3710";
+
+
+Power management
+----------------
+
+For power management (particularly DVFS and AVS), the North Bridge
+Power Management component is needed:
+
+Required properties:
+- compatible     : should contain "marvell,armada-3700-nb-pm", "syscon";
+- reg            : the register start and length for the North Bridge
+                   Power Management
+
+Example:
+
+nb_pm: syscon@14000 {
+       compatible = "marvell,armada-3700-nb-pm", "syscon";
+       reg = <0x14000 0x60>;
+}
index bb5727a..ecb360e 100644 (file)
@@ -49,7 +49,7 @@ An interrupt consumer on an SoC using crossbar will use:
        interrupts = <GIC_SPI request_number interrupt_level>
 
 Example:
-       device_x@0x4a023000 {
+       device_x@4a023000 {
                /* Crossbar 8 used */
                interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                ...
index 866d934..f9632ba 100644 (file)
@@ -8,7 +8,7 @@ Required properties:
 - interrupts : Should contain MC General interrupt.
 
 Example:
-       memory-controller@0x7000f000 {
+       memory-controller@7000f000 {
                compatible = "nvidia,tegra20-mc";
                reg = <0x7000f000 0x024
                       0x7000f03c 0x3c4>;
index fb40da3..aca94fe 100644 (file)
@@ -17,7 +17,7 @@ Optional properties:
 - clock-output-names : From common clock binding.
 
 Example:
-       clock@0xff000000 {
+       clock@ff000000 {
                compatible = "adi,axi-clkgen";
                #clock-cells = <0>;
                reg = <0xff000000 0x1000>;
index 7a837d2..4acfc8f 100644 (file)
@@ -23,7 +23,7 @@ Example:
                clocks = <&clk_osc>;
        };
 
-       aux: aux@0x7e215004 {
+       aux: aux@7e215004 {
                compatible = "brcm,bcm2835-aux";
                #clock-cells = <1>;
                reg = <0x7e215000 0x8>;
index bc61c95..17bb113 100644 (file)
@@ -24,7 +24,7 @@ tree sources.
 
 Example 1: An example of a clock controller node is listed below.
 
-       clock: clock-controller@0x10030000 {
+       clock: clock-controller@10030000 {
                compatible = "samsung,exynos4210-clock";
                reg = <0x10030000 0x20000>;
                #clock-cells = <1>;
index 536eacd..aff266a 100644 (file)
@@ -22,7 +22,7 @@ tree sources.
 
 Example 1: An example of a clock controller node is listed below.
 
-       clock: clock-controller@0x10010000 {
+       clock: clock-controller@10010000 {
                compatible = "samsung,exynos5250-clock";
                reg = <0x10010000 0x30000>;
                #clock-cells = <1>;
index 4527de3..c68b0d2 100644 (file)
@@ -30,7 +30,7 @@ Example 1: An example of a clock controller node is listed below.
                #clock-cells = <0>;
        };
 
-       clock: clock-controller@0x10010000 {
+       clock: clock-controller@10010000 {
                compatible = "samsung,exynos5410-clock";
                reg = <0x10010000 0x30000>;
                #clock-cells = <1>;
index d54f42c..717a7b1 100644 (file)
@@ -23,7 +23,7 @@ tree sources.
 
 Example 1: An example of a clock controller node is listed below.
 
-       clock: clock-controller@0x10010000 {
+       clock: clock-controller@10010000 {
                compatible = "samsung,exynos5420-clock";
                reg = <0x10010000 0x30000>;
                #clock-cells = <1>;
index 5f7005f..c7d227c 100644 (file)
@@ -21,7 +21,7 @@ tree sources.
 
 Example: An example of a clock controller node is listed below.
 
-       clock: clock-controller@0x10010000 {
+       clock: clock-controller@10010000 {
                compatible = "samsung,exynos5440-clock";
                reg = <0x160000 0x10000>;
                #clock-cells = <1>;
index 3e6a81e..c35cb6c 100644 (file)
@@ -14,7 +14,7 @@ Required properties:
 
 Example:
 
-pllctrl: pll-controller@0x02310000 {
+pllctrl: pll-controller@02310000 {
        compatible = "ti,keystone-pllctrl", "syscon";
        reg = <0x02310000 0x200>;
 };
index e85ecb5..5c91c9e 100644 (file)
@@ -20,13 +20,13 @@ ID in its "clocks" phandle cell. See include/dt-bindings/clock/zx296702-clock.h
 for the full list of zx296702 clock IDs.
 
 
-topclk: topcrm@0x09800000 {
+topclk: topcrm@09800000 {
         compatible = "zte,zx296702-topcrm-clk";
         reg = <0x09800000 0x1000>;
         #clock-cells = <1>;
 };
 
-uart0: serial@0x09405000 {
+uart0: serial@09405000 {
         compatible = "zte,zx296702-uart";
         reg = <0x09405000 0x1000>;
         interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
index 7aef0ea..76aec8a 100644 (file)
@@ -456,7 +456,7 @@ System ON/OFF key driver
       Definition: this is phandle to the register map node.
 
 EXAMPLE:
-       snvs-pwrkey@0x020cc000 {
+       snvs-pwrkey@020cc000 {
                compatible = "fsl,sec-v4.0-pwrkey";
                regmap = <&snvs>;
                interrupts = <0 4 0x4>
@@ -545,7 +545,7 @@ FULL EXAMPLE
                        interrupts = <93 2>;
                };
 
-               snvs-pwrkey@0x020cc000 {
+               snvs-pwrkey@020cc000 {
                        compatible = "fsl,sec-v4.0-pwrkey";
                        regmap = <&sec_mon>;
                        interrupts = <0 4 0x4>;
index 001dd63..148191b 100644 (file)
@@ -9,7 +9,7 @@ Required properties:
 - clock-names : the name of clock used by the DFI, must be "pclk_ddr_mon";
 
 Example:
-       dfi: dfi@0xff630000 {
+       dfi: dfi@ff630000 {
                compatible = "rockchip,rk3399-dfi";
                reg = <0x00 0xff630000 0x00 0x4000>;
                rockchip,pmu = <&pmugrf>;
index 1a21202..acb5a01 100644 (file)
@@ -27,7 +27,7 @@ Optional properties:
 
 Example:
 
-       fb0: fb@0x00500000 {
+       fb0: fb@00500000 {
                compatible = "atmel,at91sam9g45-lcdc";
                reg = <0x00500000 0x1000>;
                interrupts = <23 3 0>;
@@ -41,7 +41,7 @@ Example:
 
 Example for fixed framebuffer memory:
 
-       fb0: fb@0x00500000 {
+       fb0: fb@00500000 {
                compatible = "atmel,at91sam9263-lcdc";
                reg = <0x00700000 0x1000 0x70000000 0x200000>;
                [...]
index 55492c2..b3408cc 100644 (file)
@@ -73,7 +73,7 @@ Hypervisor OS configuration:
                max-read-transactions = <31>;
                channel-reset-timeout-cycles = <0x500>;
 
-               hidma_24: dma-controller@0x5c050000 {
+               hidma_24: dma-controller@5c050000 {
                        compatible = "qcom,hidma-1.0";
                        reg = <0 0x5c050000 0x0 0x1000>,
                              <0 0x5c0b0000 0x0 0x1000>;
@@ -85,7 +85,7 @@ Hypervisor OS configuration:
 
 Guest OS configuration:
 
-       hidma_24: dma-controller@0x5c050000 {
+       hidma_24: dma-controller@5c050000 {
                compatible = "qcom,hidma-1.0";
                reg = <0 0x5c050000 0x0 0x1000>,
                      <0 0x5c0b0000 0x0 0x1000>;
index abec59f..0ab80f6 100644 (file)
@@ -13,7 +13,7 @@ Required properties:
 Example:
 
 Controller:
-       dma: dma-controller@0x09c00000{
+       dma: dma-controller@09c00000{
                compatible = "zte,zx296702-dma";
                reg = <0x09c00000 0x1000>;
                clocks = <&topclk ZX296702_DMA_ACLK>;
index 1d34471..e823d90 100644 (file)
@@ -1,7 +1,12 @@
 EEPROMs (SPI) compatible with Atmel at25.
 
 Required properties:
-- compatible : "atmel,at25".
+- compatible : Should be "<vendor>,<type>", and generic value "atmel,at25".
+  Example "<vendor>,<type>" values:
+    "microchip,25lc040"
+    "st,m95m02"
+    "st,m95256"
+
 - reg : chip select number
 - spi-max-frequency : max spi frequency to use
 - pagesize : size of the eeprom page
@@ -13,7 +18,7 @@ Optional properties:
 - spi-cpol : SPI inverse clock polarity, as per spi-bus bindings.
 - read-only : this parameter-less property disables writes to the eeprom
 
-Obsolete legacy properties are can be used in place of "size", "pagesize",
+Obsolete legacy properties can be used in place of "size", "pagesize",
 "address-width", and "read-only":
 - at25,byte-len : total eeprom size in bytes
 - at25,addr-mode : addr-mode flags, as defined in include/linux/spi/eeprom.h
@@ -22,8 +27,8 @@ Obsolete legacy properties are can be used in place of "size", "pagesize",
 Additional compatible properties are also allowed.
 
 Example:
-       at25@0 {
-               compatible = "atmel,at25", "st,m95256";
+       eeprom@0 {
+               compatible = "st,m95256", "atmel,at25";
                reg = <0>
                spi-max-frequency = <5000000>;
                spi-cpha;
index 826a720..146e554 100644 (file)
@@ -30,7 +30,7 @@ Optional properties:
 
 Example:
 
-gpio_altr: gpio@0xff200000 {
+gpio_altr: gpio@ff200000 {
        compatible = "altr,pio-1.0";
        reg = <0xff200000 0x10>;
        interrupts = <0 45 4>;
index 7f57271..0d01587 100644 (file)
@@ -27,7 +27,7 @@ Required properties:
        ti,tca6424
        ti,tca9539
        ti,tca9554
-       onsemi,pca9654
+       onnn,pca9654
        exar,xra1202
 
 Optional properties:
index 07a2504..f569db5 100644 (file)
@@ -34,6 +34,10 @@ Required properties:
 
 - reg: I2C address
 
+Optional properties:
+- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
+                        This is not supported on all chips.
+
 Example:
 
 temp-sensor@1a {
index 231e4cc..d4a082a 100644 (file)
@@ -18,7 +18,7 @@ Optional properties:
 Example
 
 / {
-       i2c4: i2c4@0x10054000 {
+       i2c4: i2c4@10054000 {
                compatible = "ingenic,jz4780-i2c";
                reg = <0x10054000 0x1000>;
 
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
new file mode 100644 (file)
index 0000000..e9ebb8a
--- /dev/null
@@ -0,0 +1,13 @@
+Device-Tree bindings for sigma delta modulator
+
+Required properties:
+- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
+       as a generic SD modulator if modulator not specified in compatible list.
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
+
+Example node:
+
+       ads1202: adc@0 {
+               compatible = "sd-modulator";
+               #io-channel-cells = <1>;
+       };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
new file mode 100644 (file)
index 0000000..911492d
--- /dev/null
@@ -0,0 +1,128 @@
+STMicroelectronics STM32 DFSDM ADC device driver
+
+
+STM32 DFSDM ADC is a sigma delta analog-to-digital converter dedicated to
+interface external sigma delta modulators to STM32 micro controllers.
+It is mainly targeted for:
+- Sigma delta modulators (motor control, metering...)
+- PDM microphones (audio digital microphone)
+
+It features up to 8 serial digital interfaces (SPI or Manchester) and
+up to 4 filters on stm32h7.
+
+Each child node match with a filter instance.
+
+Contents of a STM32 DFSDM root node:
+------------------------------------
+Required properties:
+- compatible: Should be "st,stm32h7-dfsdm".
+- reg: Offset and length of the DFSDM block register set.
+- clocks: IP and serial interfaces clocking. Should be set according
+               to rcc clock ID and "clock-names".
+- clock-names: Input clock name "dfsdm" must be defined,
+               "audio" is optional. If defined CLKOUT is based on the audio
+               clock, else "dfsdm" is used.
+- #interrupt-cells = <1>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- spi-max-frequency: Requested only for SPI master mode.
+                 SPI clock OUT frequency (Hz). This clock must be set according
+                 to "clock" property. Frequency must be a multiple of the rcc
+                 clock frequency. If not, SPI CLKOUT frequency will not be
+                 accurate.
+
+Contents of a STM32 DFSDM child nodes:
+--------------------------------------
+
+Required properties:
+- compatible: Must be:
+       "st,stm32-dfsdm-adc" for sigma delta ADCs
+       "st,stm32-dfsdm-dmic" for audio digital microphone.
+- reg: Specifies the DFSDM filter instance used.
+- interrupts: IRQ lines connected to each DFSDM filter instance.
+- st,adc-channels:     List of single-ended channels muxed for this ADC.
+                       valid values:
+                               "st,stm32h7-dfsdm" compatibility: 0 to 7.
+- st,adc-channel-names:        List of single-ended channel names.
+- st,filter-order:  SinC filter order from 0 to 5.
+                       0: FastSinC
+                       [1-5]: order 1 to 5.
+                       For audio purpose it is recommended to use order 3 to 5.
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
+
+Required properties for "st,stm32-dfsdm-adc" compatibility:
+- io-channels: From common IIO binding. Used to pipe external sigma delta
+               modulator or internal ADC output to DFSDM channel.
+               This is not required for "st,stm32-dfsdm-pdm" compatibility as
+               PDM microphone is binded in Audio DT node.
+
+Required properties for "st,stm32-dfsdm-pdm" compatibility:
+- #sound-dai-cells: Must be set to 0.
+- dma: DMA controller phandle and DMA request line associated to the
+               filter instance (specified by the field "reg")
+- dma-names: Must be "rx"
+
+Optional properties:
+- st,adc-channel-types:        Single-ended channel input type.
+                       - "SPI_R": SPI with data on rising edge (default)
+                       - "SPI_F": SPI with data on falling edge
+                       - "MANCH_R": manchester codec, rising edge = logic 0
+                       - "MANCH_F": manchester codec, falling edge = logic 1
+- st,adc-channel-clk-src: Conversion clock source.
+                         - "CLKIN": external SPI clock (CLKIN x)
+                         - "CLKOUT": internal SPI clock (CLKOUT) (default)
+                         - "CLKOUT_F": internal SPI clock divided by 2 (falling edge).
+                         - "CLKOUT_R": internal SPI clock divided by 2 (rising edge).
+
+- st,adc-alt-channel: Must be defined if two sigma delta modulator are
+                         connected on same SPI input.
+                         If not set, channel n is connected to SPI input n.
+                         If set, channel n is connected to SPI input n + 1.
+
+- st,filter0-sync: Set to 1 to synchronize with DFSDM filter instance 0.
+                  Used for multi microphones synchronization.
+
+Example of a sigma delta adc connected on DFSDM SPI port 0
+and a pdm microphone connected on DFSDM SPI port 1:
+
+       ads1202: simple_sd_adc@0 {
+               compatible = "ads1202";
+               #io-channel-cells = <1>;
+       };
+
+       dfsdm: dfsdm@40017000 {
+               compatible = "st,stm32h7-dfsdm";
+               reg = <0x40017000 0x400>;
+               clocks = <&rcc DFSDM1_CK>;
+               clock-names = "dfsdm";
+               #interrupt-cells = <1>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               dfsdm_adc0: filter@0 {
+                       compatible = "st,stm32-dfsdm-adc";
+                       #io-channel-cells = <1>;
+                       reg = <0>;
+                       interrupts = <110>;
+                       st,adc-channels = <0>;
+                       st,adc-channel-names = "sd_adc0";
+                       st,adc-channel-types = "SPI_F";
+                       st,adc-channel-clk-src = "CLKOUT";
+                       io-channels = <&ads1202 0>;
+                       st,filter-order = <3>;
+               };
+               dfsdm_pdm1: filter@1 {
+                       compatible = "st,stm32-dfsdm-dmic";
+                       reg = <1>;
+                       interrupts = <111>;
+                       dmas = <&dmamux1 102 0x400 0x00>;
+                       dma-names = "rx";
+                       st,adc-channels = <1>;
+                       st,adc-channel-names = "dmic1";
+                       st,adc-channel-types = "SPI_R";
+                       st,adc-channel-clk-src = "CLKOUT";
+                       st,filter-order = <5>;
+               };
+       }
index 54e7e70..831dbee 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
 
 Example:
 
-hp03@0x77 {
+hp03@77 {
        compatible = "hoperf,hp03";
        reg = <0x77>;
        xclr-gpio = <&portc 0 0x0>;
index ca5a2c8..56d8352 100644 (file)
@@ -15,7 +15,7 @@ Optional properties:
 Example:
 
        i2c@80110000 {
-               bu21013_tp@0x5c {
+               bu21013_tp@5c {
                        compatible = "rohm,bu21013_tp";
                        reg = <0x5c>;
                        touch-gpio = <&gpio2 20 0x4>;
index 560d8a7..2f32446 100644 (file)
@@ -155,7 +155,7 @@ Example:
                      <0x0 0xe112f000 0 0x02000>,
                      <0x0 0xe1140000 0 0x10000>,
                      <0x0 0xe1160000 0 0x10000>;
-               v2m0: v2m@0x8000 {
+               v2m0: v2m@8000 {
                        compatible = "arm,gic-v2m-frame";
                        msi-controller;
                        reg = <0x0 0x80000 0 0x1000>;
@@ -163,7 +163,7 @@ Example:
 
                ....
 
-               v2mN: v2m@0x9000 {
+               v2mN: v2m@9000 {
                        compatible = "arm,gic-v2m-frame";
                        msi-controller;
                        reg = <0x0 0x90000 0 0x1000>;
index 80994ad..42431f4 100644 (file)
@@ -71,7 +71,7 @@ Example 2:
         * An interrupt generating device that is wired to a Meta external
         * trigger block.
         */
-       uart1: uart@0x02004c00 {
+       uart1: uart@02004c00 {
                // Interrupt source '5' that is level-sensitive.
                // Note that there are only two cells as specified in the
                // interrupt parent's '#interrupt-cells' property.
index a691185..5dc2a55 100644 (file)
@@ -51,7 +51,7 @@ Example 1:
        /*
         * TZ1090 PDC block
         */
-       pdc: pdc@0x02006000 {
+       pdc: pdc@02006000 {
                // This is an interrupt controller node.
                interrupt-controller;
 
index 715a013..2ab0ea3 100644 (file)
@@ -39,7 +39,7 @@ Example:
 
 The following is an example from the SPEAr320 SoC dtsi file.
 
-shirq: interrupt-controller@0xb3000000 {
+shirq: interrupt-controller@b3000000 {
        compatible = "st,spear320-shirq";
        reg = <0xb3000000 0x1000>;
        interrupts = <28 29 30 1>;
index c261979..49cfc8c 100644 (file)
@@ -14,7 +14,7 @@ Optional properties:
                        depends on the interrupt controller parent.
 
 Example:
-       mbox_tx: mailbox@0x100 {
+       mbox_tx: mailbox@100 {
                compatible = "altr,mailbox-1.0";
                reg = <0x100 0x8>;
                interrupt-parent = < &gic_0 >;
@@ -22,7 +22,7 @@ Example:
                #mbox-cells = <1>;
        };
 
-       mbox_rx: mailbox@0x200 {
+       mbox_rx: mailbox@200 {
                compatible = "altr,mailbox-1.0";
                reg = <0x200 0x8>;
                interrupt-parent = < &gic_0 >;
@@ -40,7 +40,7 @@ support only one channel).The equivalent "mbox-names" property value can be
 used to give a name to the communication channel to be used by the client user.
 
 Example:
-       mclient0: mclient0@0x400 {
+       mclient0: mclient0@400 {
                compatible = "client-1.0";
                reg = <0x400 0x10>;
                mbox-names = "mbox-tx", "mbox-rx";
index 0f3ee81..9bcdf20 100644 (file)
@@ -15,7 +15,7 @@ Optional properties:
 - brcm,use-bcm-hdr:  present if a BCM header precedes each frame.
 
 Example:
-       pdc0: iproc-pdc0@0x612c0000 {
+       pdc0: iproc-pdc0@612c0000 {
                compatible = "brcm,iproc-pdc-mbox";
                reg = <0 0x612c0000 0 0x445>;  /* PDC FS0 regs */
                interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
index 0d4fdae..bc963a6 100644 (file)
@@ -17,7 +17,7 @@ Optional properties:
 
 Example:
 
-gsc_0:  gsc@0x13e00000 {
+gsc_0:  gsc@13e00000 {
        compatible = "samsung,exynos5250-gsc";
        reg = <0x13e00000 0x1000>;
        interrupts = <0 85 0>;
index 46c15c5..2a615d8 100644 (file)
@@ -68,7 +68,7 @@ vcodec_dec: vcodec@16000000 {
                   "vdec_bus_clk_src";
   };
 
-  vcodec_enc: vcodec@0x18002000 {
+  vcodec_enc: vcodec@18002000 {
     compatible = "mediatek,mt8173-vcodec-enc";
     reg = <0 0x18002000 0 0x1000>,    /*VENC_SYS*/
           <0 0x19002000 0 0x1000>;    /*VENC_LT_SYS*/
index 6e4ef8c..19357d0 100644 (file)
@@ -44,7 +44,7 @@ Device node example
               vin0 = &vin0;
        };
 
-        vin0: vin@0xe6ef0000 {
+        vin0: vin@e6ef0000 {
                 compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin";
                 clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
                 reg = <0 0xe6ef0000 0 0x1000>;
index e4e15d8..48c599d 100644 (file)
@@ -138,7 +138,7 @@ Example:
                };
 
                /* MIPI CSI-2 bus IF sensor */
-               s5c73m3: sensor@0x1a {
+               s5c73m3: sensor@1a {
                        compatible = "samsung,s5c73m3";
                        reg = <0x1a>;
                        vddio-supply = <...>;
index 1ce4e46..17a8e81 100644 (file)
@@ -8,7 +8,7 @@ Bindings, specific for the sh_mobile_ceu_camera.c driver:
 
 Example:
 
-ceu0: ceu@0xfe910000 {
+ceu0: ceu@fe910000 {
        compatible = "renesas,sh-mobile-ceu";
        reg = <0xfe910000 0xa0>;
        interrupt-parent = <&intcs>;
index 3994b01..258b8df 100644 (file)
@@ -154,7 +154,7 @@ imx074 is linked to ceu0 through the MIPI CSI-2 receiver (csi2). ceu0 has a
 'port' node which may indicate that at any time only one of the following data
 pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
 
-       ceu0: ceu@0xfe910000 {
+       ceu0: ceu@fe910000 {
                compatible = "renesas,sh-mobile-ceu";
                reg = <0xfe910000 0xa0>;
                interrupts = <0x880>;
@@ -193,9 +193,9 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
                };
        };
 
-       i2c0: i2c@0xfff20000 {
+       i2c0: i2c@fff20000 {
                ...
-               ov772x_1: camera@0x21 {
+               ov772x_1: camera@21 {
                        compatible = "ovti,ov772x";
                        reg = <0x21>;
                        vddio-supply = <&regulator1>;
@@ -219,7 +219,7 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
                        };
                };
 
-               imx074: camera@0x1a {
+               imx074: camera@1a {
                        compatible = "sony,imx074";
                        reg = <0x1a>;
                        vddio-supply = <&regulator1>;
@@ -239,7 +239,7 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
                };
        };
 
-       csi2: csi2@0xffc90000 {
+       csi2: csi2@ffc90000 {
                compatible = "renesas,sh-mobile-csi2";
                reg = <0xffc90000 0x1000>;
                interrupts = <0x17a0>;
index fd823d6..152eecc 100644 (file)
@@ -46,7 +46,7 @@ Optional properties:
 
 Example:
 
-emif1: emif@0x4c000000 {
+emif1: emif@4c000000 {
        compatible      = "ti,emif-4d";
        ti,hwmods       = "emif2";
        phy-type        = <1>;
index ac235fe..8261ea7 100644 (file)
@@ -130,7 +130,7 @@ ecspi@70010000 { /* ECSPI1 */
                        #size-cells = <0>;
                        led-control = <0x000 0x000 0x0e0 0x000>;
 
-                       sysled {
+                       sysled@3 {
                                reg = <3>;
                                label = "system:red:live";
                                linux,default-trigger = "heartbeat";
index 20963c7..71a1f59 100644 (file)
@@ -13,7 +13,7 @@ Required properties:
 
 Example:
 
-devctrl: device-state-control@0x02620000 {
+devctrl: device-state-control@02620000 {
        compatible = "ti,keystone-devctrl", "syscon";
        reg = <0x02620000 0x1000>;
 };
index 6c9f176..05b4723 100644 (file)
@@ -9,7 +9,7 @@ Required properties:
 - reg : Location and size of bounce buffer
 
 Example:
-       smc@0x3404c000 {
+       smc@3404c000 {
                compatible = "brcm,bcm11351-smc", "brcm,kona-smc";
                reg = <0x3404c000 0x400>; //1 KiB in SRAM
        };
index aaba248..7f5dd83 100644 (file)
@@ -12,7 +12,7 @@ Refer to clocks/clock-bindings.txt for generic clock consumer properties.
 
 Example:
 
-sdio2: sdio@0x3f1a0000 {
+sdio2: sdio@3f1a0000 {
        compatible = "brcm,kona-sdhci";
        reg = <0x3f1a0000 0x10000>;
        clocks = <&sdio3_clk>;
index 954561d..fa90d25 100644 (file)
@@ -24,7 +24,7 @@ Optional properties:
 
 Example:
 
-sdhci0: sdhci@0x18041000 {
+sdhci0: sdhci@18041000 {
        compatible = "brcm,sdhci-iproc-cygnus";
        reg = <0x18041000 0x100>;
        interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
index 3a4ac40..19f5508 100644 (file)
@@ -55,7 +55,7 @@ Examples:
 
 [hwmod populated DMA resources]
 
-       mmc1: mmc@0x4809c000 {
+       mmc1: mmc@4809c000 {
                compatible = "ti,omap4-hsmmc";
                reg = <0x4809c000 0x400>;
                ti,hwmods = "mmc1";
@@ -67,7 +67,7 @@ Examples:
 
 [generic DMA request binding]
 
-       mmc1: mmc@0x4809c000 {
+       mmc1: mmc@4809c000 {
                compatible = "ti,omap4-hsmmc";
                reg = <0x4809c000 0x400>;
                ti,hwmods = "mmc1";
index 131d3a7..c8567b4 100644 (file)
@@ -82,15 +82,15 @@ gpmc: gpmc@6e000000 {
                        label = "bootloader-nor";
                        reg = <0 0x40000>;
                };
-               partition@0x40000 {
+               partition@40000 {
                        label = "params-nor";
                        reg = <0x40000 0x40000>;
                };
-               partition@0x80000 {
+               partition@80000 {
                        label = "kernel-nor";
                        reg = <0x80000 0x200000>;
                };
-               partition@0x280000 {
+               partition@280000 {
                        label = "filesystem-nor";
                        reg = <0x240000 0x7d80000>;
                };
index 376fa2f..956bb04 100644 (file)
@@ -13,7 +13,6 @@ Required properties:
                  at25df321a
                  at25df641
                  at26df081a
-                 en25s64
                  mr25h128
                  mr25h256
                  mr25h10
@@ -33,7 +32,6 @@ Required properties:
                  s25fl008k
                  s25fl064k
                  sst25vf040b
-                 sst25wf040b
                  m25p40
                  m25p80
                  m25p16
index 0025bc4..1c88526 100644 (file)
@@ -133,7 +133,7 @@ Example:
                                read-only;
                                reg = <0x00000000 0x00400000>;
                        };
-                       android@0x00400000 {
+                       android@00400000 {
                                label = "android";
                                reg = <0x00400000 0x12c00000>;
                        };
index a706297..0e21df9 100644 (file)
@@ -52,7 +52,7 @@ Optional properties:
 
 Example:
 
-       tse_sub_0_eth_tse_0: ethernet@0x1,00000000 {
+       tse_sub_0_eth_tse_0: ethernet@1,00000000 {
                compatible = "altr,tse-msgdma-1.0";
                reg =   <0x00000001 0x00000000 0x00000400>,
                        <0x00000001 0x00000460 0x00000020>,
@@ -90,7 +90,7 @@ Example:
                };
        };
 
-       tse_sub_1_eth_tse_0: ethernet@0x1,00001000 {
+       tse_sub_1_eth_tse_0: ethernet@1,00001000 {
                compatible = "altr,tse-msgdma-1.0";
                reg =   <0x00000001 0x00001000 0x00000400>,
                        <0x00000001 0x00001460 0x00000020>,
index 96a53f8..e3e1603 100644 (file)
@@ -18,7 +18,7 @@ Example :
 This example shows these optional properties, plus other properties
 required for the TI Davinci MDIO driver.
 
-       davinci_mdio: ethernet@0x5c030000 {
+       davinci_mdio: ethernet@5c030000 {
                compatible = "ti,davinci_mdio";
                reg = <0x5c030000 0x1000>;
                #address-cells = <1>;
index b30d04b..17d6819 100644 (file)
@@ -28,7 +28,7 @@ Required properties:
 
 Example:
 
-gmii_to_sgmii_converter: phy@0x100000240 {
+gmii_to_sgmii_converter: phy@100000240 {
        compatible = "altr,gmii-to-sgmii-2.0";
        reg = <0x00000001 0x00000240 0x00000008>,
                <0x00000001 0x00000200 0x00000040>;
index d6d0a94..b95e831 100644 (file)
@@ -36,7 +36,7 @@ Optional properties:
 
 Example:
 
-cpu@0x0 {
+cpu@0 {
        device_type = "cpu";
        compatible = "altr,nios2-1.0";
        reg = <0>;
index 9d733af..4e4f302 100644 (file)
@@ -45,6 +45,11 @@ Devices supporting OPPs must set their "operating-points-v2" property with
 phandle to a OPP table in their DT node. The OPP core will use this phandle to
 find the operating points for the device.
 
+This can contain more than one phandle for power domain providers that provide
+multiple power domains. That is, one phandle for each power domain. If only one
+phandle is available, then the same OPP table will be used for all power domains
+provided by the power domain provider.
+
 If required, this can be extended for SoC vendor specific bindings. Such bindings
 should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
 and should have a compatible description like: "operating-points-v2-<vendor>".
@@ -154,6 +159,14 @@ Optional properties:
 
 - status: Marks the node enabled/disabled.
 
+- required-opp: This contains phandle to an OPP node in another device's OPP
+  table. It may contain an array of phandles, where each phandle points to an
+  OPP of a different device. It should not contain multiple phandles to the OPP
+  nodes in the same OPP table. This specifies the minimum required OPP of the
+  device(s), whose OPP's phandle is present in this property, for the
+  functioning of the current device at the current OPP (where this property is
+  present).
+
 Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
 
 / {
diff --git a/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt b/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
new file mode 100644 (file)
index 0000000..832346e
--- /dev/null
@@ -0,0 +1,63 @@
+Texas Instruments OMAP compatible OPP supply description
+
+OMAP5, DRA7, and AM57 family of SoCs have Class0 AVS eFuse registers which
+contain data that can be used to adjust voltages programmed for some of their
+supplies for more efficient operation. This binding provides the information
+needed to read these values and use them to program the main regulator during
+an OPP transitions.
+
+Also, some supplies may have an associated vbb-supply which is an Adaptive Body
+Bias regulator which much be transitioned in a specific sequence with regards
+to the vdd-supply and clk when making an OPP transition. By supplying two
+regulators to the device that will undergo OPP transitions we can make use
+of the multi regulator binding that is part of the OPP core described here [1]
+to describe both regulators needed by the platform.
+
+[1] Documentation/devicetree/bindings/opp/opp.txt
+
+Required Properties for Device Node:
+- vdd-supply: phandle to regulator controlling VDD supply
+- vbb-supply: phandle to regulator controlling Body Bias supply
+             (Usually Adaptive Body Bias regulator)
+
+Required Properties for opp-supply node:
+- compatible: Should be one of:
+       "ti,omap-opp-supply" - basic OPP supply controlling VDD and VBB
+       "ti,omap5-opp-supply" - OMAP5+ optimized voltages in efuse(class0)VDD
+                           along with VBB
+       "ti,omap5-core-opp-supply" - OMAP5+ optimized voltages in efuse(class0) VDD
+                           but no VBB.
+- reg: Address and length of the efuse register set for the device (mandatory
+       only for "ti,omap5-opp-supply")
+- ti,efuse-settings: An array of u32 tuple items providing information about
+       optimized efuse configuration. Each item consists of the following:
+       volt: voltage in uV - reference voltage (OPP voltage)
+       efuse_offseet: efuse offset from reg where the optimized voltage is stored.
+- ti,absolute-max-voltage-uv: absolute maximum voltage for the OPP supply.
+
+Example:
+
+/* Device Node (CPU)  */
+cpus {
+       cpu0: cpu@0 {
+               device_type = "cpu";
+
+               ...
+
+               vdd-supply = <&vcc>;
+               vbb-supply = <&abb_mpu>;
+       };
+};
+
+/* OMAP OPP Supply with Class0 registers */
+opp_supply_mpu: opp_supply@4a003b20 {
+       compatible = "ti,omap5-opp-supply";
+       reg = <0x4a003b20 0x8>;
+       ti,efuse-settings = <
+       /* uV   offset */
+       1060000 0x0
+       1160000 0x4
+       1210000 0x8
+       >;
+       ti,absolute-max-voltage-uv = <1500000>;
+};
index 4958801..a1dc936 100644 (file)
@@ -25,7 +25,7 @@ Optional properties:
 - bus-range:   PCI bus numbers covered
 
 Example
-       pcie_0: pcie@0xc00000000 {
+       pcie_0: pcie@c00000000 {
                compatible = "altr,pcie-root-port-1.0";
                reg = <0xc0000000 0x20000000>,
                        <0xff220000 0x00004000>;
index 7b1e48b..149d8f7 100644 (file)
@@ -52,7 +52,7 @@ Additional required properties for imx7d-pcie:
 
 Example:
 
-       pcie@0x01000000 {
+       pcie@01000000 {
                compatible = "fsl,imx6q-pcie", "snps,dw-pcie";
                reg = <0x01ffc000 0x04000>,
                      <0x01f00000 0x80000>;
index bdb7ab3..7bf9df0 100644 (file)
@@ -21,7 +21,7 @@ Optional properties:
 - dma-coherent: Present if DMA operations are coherent.
 
 Hip05 Example (note that Hip06 is the same except compatible):
-       pcie@0xb0080000 {
+       pcie@b0080000 {
                compatible = "hisilicon,hip05-pcie", "snps,dw-pcie";
                reg = <0 0xb0080000 0 0x10000>, <0x220 0x00000000 0 0x2000>;
                reg-names = "rc_dbi", "config";
index cbc7847..c1ce5a0 100644 (file)
@@ -45,7 +45,7 @@ Optional properties:
 - usb3_vbus-supply : regulator phandle for controller usb3 vbus
 
 Example:
-       usbphy: phy@0x01c13400 {
+       usbphy: phy@01c13400 {
                #phy-cells = <1>;
                compatible = "allwinner,sun4i-a10-usb-phy";
                /* phy base regs, phy1 pmu reg, phy2 pmu reg */
index 3600d5c..3914529 100644 (file)
@@ -25,7 +25,7 @@ Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
 
 For example:
 
-       pinmux: pinmux@0x0301d0c8 {
+       pinmux: pinmux@0301d0c8 {
                compatible = "brcm,cygnus-pinmux";
                reg = <0x0301d0c8 0x1b0>;
 
index eecf028..bf9b070 100644 (file)
@@ -96,14 +96,14 @@ For example, pinctrl might have subnodes like the following:
 
 For a specific board, if it wants to use sd1,
 it can add the following to its board-specific .dts file.
-sd1: sd@0x12340000 {
+sd1: sd@12340000 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd1_pmx0>;
 }
 
 or
 
-sd1: sd@0x12340000 {
+sd1: sd@12340000 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd1_pmx1>;
 }
index 5f55be5..f842052 100644 (file)
@@ -41,7 +41,7 @@ For example, pinctrl might have subnodes like the following:
 
 For a specific board, if it wants to use uart2 without hardware flow control,
 it can add the following to its board-specific .dts file.
-uart2: uart@0xb0070000 {
+uart2: uart@b0070000 {
        pinctrl-names = "default";
        pinctrl-0 = <&uart2_noflow_pins_a>;
 }
index 4864e3a..a01a3b8 100644 (file)
@@ -136,7 +136,7 @@ Example for rk3188:
                #size-cells = <1>;
                ranges;
 
-               gpio0: gpio0@0x2000a000 {
+               gpio0: gpio0@2000a000 {
                        compatible = "rockchip,rk3188-gpio-bank0";
                        reg = <0x2000a000 0x100>;
                        interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
@@ -149,7 +149,7 @@ Example for rk3188:
                        #interrupt-cells = <2>;
                };
 
-               gpio1: gpio1@0x2003c000 {
+               gpio1: gpio1@2003c000 {
                        compatible = "rockchip,gpio-bank";
                        reg = <0x2003c000 0x100>;
                        interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
index 14bd9e9..f335531 100644 (file)
@@ -40,6 +40,12 @@ Optional properties:
   domain's idle states. In the absence of this property, the domain would be
   considered as capable of being powered-on or powered-off.
 
+- operating-points-v2 : Phandles to the OPP tables of power domains provided by
+  a power domain provider. If the provider provides a single power domain only
+  or all the power domains provided by the provider have identical OPP tables,
+  then this shall contain a single phandle. Refer to ../opp/opp.txt for more
+  information.
+
 Example:
 
        power: power-controller@12340000 {
@@ -120,4 +126,63 @@ The node above defines a typical PM domain consumer device, which is located
 inside a PM domain with index 0 of a power controller represented by a node
 with the label "power".
 
+Optional properties:
+- required-opp: This contains phandle to an OPP node in another device's OPP
+  table. It may contain an array of phandles, where each phandle points to an
+  OPP of a different device. It should not contain multiple phandles to the OPP
+  nodes in the same OPP table. This specifies the minimum required OPP of the
+  device(s), whose OPP's phandle is present in this property, for the
+  functioning of the current device at the current OPP (where this property is
+  present).
+
+Example:
+- OPP table for domain provider that provides two domains.
+
+       domain0_opp_table: opp-table0 {
+               compatible = "operating-points-v2";
+
+               domain0_opp_0: opp-1000000000 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <975000 970000 985000>;
+               };
+               domain0_opp_1: opp-1100000000 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <1000000 980000 1010000>;
+               };
+       };
+
+       domain1_opp_table: opp-table1 {
+               compatible = "operating-points-v2";
+
+               domain1_opp_0: opp-1200000000 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <975000 970000 985000>;
+               };
+               domain1_opp_1: opp-1300000000 {
+                       opp-hz = /bits/ 64 <1300000000>;
+                       opp-microvolt = <1000000 980000 1010000>;
+               };
+       };
+
+       power: power-controller@12340000 {
+               compatible = "foo,power-controller";
+               reg = <0x12340000 0x1000>;
+               #power-domain-cells = <1>;
+               operating-points-v2 = <&domain0_opp_table>, <&domain1_opp_table>;
+       };
+
+       leaky-device0@12350000 {
+               compatible = "foo,i-leak-current";
+               reg = <0x12350000 0x1000>;
+               power-domains = <&power 0>;
+               required-opp = <&domain0_opp_0>;
+       };
+
+       leaky-device1@12350000 {
+               compatible = "foo,i-leak-current";
+               reg = <0x12350000 0x1000>;
+               power-domains = <&power 1>;
+               required-opp = <&domain1_opp_1>;
+       };
+
 [1]. Documentation/devicetree/bindings/power/domain-idle-state.txt
index 378f6dc..3cbf56c 100644 (file)
@@ -107,7 +107,7 @@ regulators (twl_reg1 and twl_reg2),
                ...
        };
 
-       mmc: mmc@0x0 {
+       mmc: mmc@0 {
                ...
                ...
                vmmc-supply = <&twl_reg1>;
index 8adbab2..4f8d8fd 100644 (file)
@@ -12,7 +12,7 @@ Optional properties:
 
 Example:
 
-uart@0x4000c400 {
+uart@4000c400 {
        compatible = "energymicro,efm32-uart";
        reg = <0x4000c400 0x400>;
        interrupts = <15>;
index f311472..75996b6 100644 (file)
@@ -14,7 +14,7 @@ Required properties:
 
 
 Example:
-       ps20: ps2@0x01c2a000 {
+       ps20: ps2@01c2a000 {
                compatible = "allwinner,sun4i-a10-ps2";
                reg = <0x01c2a000 0x400>;
                interrupts = <0 62 4>;
index 64c66a5..77cd42c 100644 (file)
@@ -220,7 +220,7 @@ qmss: qmss@2a40000 {
                #address-cells = <1>;
                #size-cells = <1>;
                ranges;
-               pdsp0@0x2a10000 {
+               pdsp0@2a10000 {
                        reg = <0x2a10000 0x1000>,
                              <0x2a0f000 0x100>,
                              <0x2a0c000 0x3c8>,
index 5875ca4..4248b66 100644 (file)
@@ -21,7 +21,7 @@ please check:
 
 Example:
 
-       i2s: i2s@0x77600000 {
+       i2s: i2s@77600000 {
                compatible = "adi,axi-i2s-1.00.a";
                reg = <0x77600000 0x1000>;
                clocks = <&clk 15>, <&audio_clock>;
index 4eb7997..7b664e7 100644 (file)
@@ -20,7 +20,7 @@ please check:
 
 Example:
 
-       spdif: spdif@0x77400000 {
+       spdif: spdif@77400000 {
                compatible = "adi,axi-spdif-tx-1.00.a";
                reg = <0x77600000 0x1000>;
                clocks = <&clk 15>, <&audio_clock>;
index 1783f9e..49a2e74 100644 (file)
@@ -20,7 +20,7 @@ Optional properties:
 Example:
 
 &i2c {
-       ak4613: ak4613@0x10 {
+       ak4613: ak4613@10 {
                compatible = "asahi-kasei,ak4613";
                reg = <0x10>;
        };
index 340784d..58e48ee 100644 (file)
@@ -17,7 +17,7 @@ Optional properties:
 Example 1:
 
 &i2c {
-       ak4648: ak4648@0x12 {
+       ak4648: ak4648@12 {
                compatible = "asahi-kasei,ak4642";
                reg = <0x12>;
        };
index 5ca5a70..3ab9dfe 100644 (file)
@@ -73,7 +73,7 @@ Example:
                compatible = "dlg,da7218";
                reg = <0x1a>;
                interrupt-parent = <&gpio6>;
-               interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
                wakeup-source;
 
                VDD-supply = <&reg_audio>;
index cf61681..5b54d2d 100644 (file)
@@ -77,7 +77,7 @@ Example:
                reg = <0x1a>;
 
                interrupt-parent = <&gpio6>;
-               interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
 
                VDD-supply = <&reg_audio>;
                VDDMIC-supply = <&reg_audio>;
index 54c8ef6..f7bf656 100644 (file)
@@ -7,10 +7,12 @@ Required properties:
 
 Optional properties:
        - dmicen-gpios: GPIO specifier for dmic to control start and stop
+       - num-channels: Number of microphones on this DAI
 
 Example node:
 
        dmic_codec: dmic@0 {
                compatible = "dmic-codec";
                dmicen-gpios = <&gpio4 3 GPIO_ACTIVE_HIGH>;
+               num-channels = <1>;
        };
index 6c28523..8b2b270 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
 Example:
 
 &i2c {
-       max98371: max98371@0x31 {
+       max98371: max98371@31 {
                compatible = "maxim,max98371";
                reg = <0x31>;
        };
diff --git a/Documentation/devicetree/bindings/sound/max98373.txt b/Documentation/devicetree/bindings/sound/max98373.txt
new file mode 100644 (file)
index 0000000..456cb1c
--- /dev/null
@@ -0,0 +1,40 @@
+Maxim Integrated MAX98373 Speaker Amplifier
+
+This device supports I2C.
+
+Required properties:
+
+ - compatible : "maxim,max98373"
+
+ - reg : the I2C address of the device.
+
+Optional properties:
+
+  - maxim,vmon-slot-no : slot number used to send voltage information
+                   or in inteleave mode this will be used as
+                   interleave slot.
+                   slot range : 0 ~ 15,  Default : 0
+
+  - maxim,imon-slot-no : slot number used to send current information
+                   slot range : 0 ~ 15,  Default : 0
+
+  - maxim,spkfb-slot-no : slot number used to send speaker feedback information
+                   slot range : 0 ~ 15,  Default : 0
+
+  - maxim,interleave-mode : For cases where a single combined channel
+                  for the I/V sense data is not sufficient, the device can also be configured
+                  to share a single data output channel on alternating frames.
+                  In this configuration, the current and voltage data will be frame interleaved
+                  on a single output channel.
+                   Boolean, define to enable the interleave mode, Default : false
+
+Example:
+
+codec: max98373@31 {
+   compatible = "maxim,max98373";
+   reg = <0x31>;
+   maxim,vmon-slot-no = <0>;
+   maxim,imon-slot-no = <1>;
+   maxim,spkfb-slot-no = <2>;
+   maxim,interleave-mode;
+};
index 394cd4e..b8bd914 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
 Example:
 
 &i2c {
-       max9867: max9867@0x18 {
+       max9867: max9867@18 {
                compatible = "maxim,max9867";
                reg = <0x18>;
        };
index 77a57f8..6df87b9 100644 (file)
@@ -2,153 +2,143 @@ Mediatek AFE PCM controller for mt2701
 
 Required properties:
 - compatible = "mediatek,mt2701-audio";
-- reg: register location and size
 - interrupts: should contain AFE and ASYS interrupts
 - interrupt-names: should be "afe" and "asys"
 - power-domains: should define the power domain
+- clocks: Must contain an entry for each entry in clock-names
+  See ../clocks/clock-bindings.txt for details
 - clock-names: should have these clock names:
                "infra_sys_audio_clk",
                "top_audio_mux1_sel",
                "top_audio_mux2_sel",
-               "top_audio_mux1_div",
-               "top_audio_mux2_div",
-               "top_audio_48k_timing",
-               "top_audio_44k_timing",
-               "top_audpll_mux_sel",
-               "top_apll_sel",
-               "top_aud1_pll_98M",
-               "top_aud2_pll_90M",
-               "top_hadds2_pll_98M",
-               "top_hadds2_pll_294M",
-               "top_audpll",
-               "top_audpll_d4",
-               "top_audpll_d8",
-               "top_audpll_d16",
-               "top_audpll_d24",
-               "top_audintbus_sel",
-               "clk_26m",
-               "top_syspll1_d4",
-               "top_aud_k1_src_sel",
-               "top_aud_k2_src_sel",
-               "top_aud_k3_src_sel",
-               "top_aud_k4_src_sel",
-               "top_aud_k5_src_sel",
-               "top_aud_k6_src_sel",
-               "top_aud_k1_src_div",
-               "top_aud_k2_src_div",
-               "top_aud_k3_src_div",
-               "top_aud_k4_src_div",
-               "top_aud_k5_src_div",
-               "top_aud_k6_src_div",
-               "top_aud_i2s1_mclk",
-               "top_aud_i2s2_mclk",
-               "top_aud_i2s3_mclk",
-               "top_aud_i2s4_mclk",
-               "top_aud_i2s5_mclk",
-               "top_aud_i2s6_mclk",
-               "top_asm_m_sel",
-               "top_asm_h_sel",
-               "top_univpll2_d4",
-               "top_univpll2_d2",
-               "top_syspll_d5";
+               "top_audio_a1sys_hp",
+               "top_audio_a2sys_hp",
+               "i2s0_src_sel",
+               "i2s1_src_sel",
+               "i2s2_src_sel",
+               "i2s3_src_sel",
+               "i2s0_src_div",
+               "i2s1_src_div",
+               "i2s2_src_div",
+               "i2s3_src_div",
+               "i2s0_mclk_en",
+               "i2s1_mclk_en",
+               "i2s2_mclk_en",
+               "i2s3_mclk_en",
+               "i2so0_hop_ck",
+               "i2so1_hop_ck",
+               "i2so2_hop_ck",
+               "i2so3_hop_ck",
+               "i2si0_hop_ck",
+               "i2si1_hop_ck",
+               "i2si2_hop_ck",
+               "i2si3_hop_ck",
+               "asrc0_out_ck",
+               "asrc1_out_ck",
+               "asrc2_out_ck",
+               "asrc3_out_ck",
+               "audio_afe_pd",
+               "audio_afe_conn_pd",
+               "audio_a1sys_pd",
+               "audio_a2sys_pd",
+               "audio_mrgif_pd";
+- assigned-clocks: list of input clocks and dividers for the audio system.
+                  See ../clocks/clock-bindings.txt for details.
+- assigned-clocks-parents: parent of input clocks of assigned clocks.
+- assigned-clock-rates: list of clock frequencies of assigned clocks.
+
+Must be a subnode of MediaTek audsys device tree node.
+See ../arm/mediatek/mediatek,audsys.txt for details about the parent node.
 
 Example:
 
-       afe: mt2701-afe-pcm@11220000 {
-               compatible = "mediatek,mt2701-audio";
-               reg = <0 0x11220000 0 0x2000>,
-                     <0 0x112A0000 0 0x20000>;
-               interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
-                            <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
-               interrupt-names = "afe", "asys";
-               power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
-               clocks = <&infracfg CLK_INFRA_AUDIO>,
-                        <&topckgen CLK_TOP_AUD_MUX1_SEL>,
-                        <&topckgen CLK_TOP_AUD_MUX2_SEL>,
-                        <&topckgen CLK_TOP_AUD_MUX1_DIV>,
-                        <&topckgen CLK_TOP_AUD_MUX2_DIV>,
-                        <&topckgen CLK_TOP_AUD_48K_TIMING>,
-                        <&topckgen CLK_TOP_AUD_44K_TIMING>,
-                        <&topckgen CLK_TOP_AUDPLL_MUX_SEL>,
-                        <&topckgen CLK_TOP_APLL_SEL>,
-                        <&topckgen CLK_TOP_AUD1PLL_98M>,
-                        <&topckgen CLK_TOP_AUD2PLL_90M>,
-                        <&topckgen CLK_TOP_HADDS2PLL_98M>,
-                        <&topckgen CLK_TOP_HADDS2PLL_294M>,
-                        <&topckgen CLK_TOP_AUDPLL>,
-                        <&topckgen CLK_TOP_AUDPLL_D4>,
-                        <&topckgen CLK_TOP_AUDPLL_D8>,
-                        <&topckgen CLK_TOP_AUDPLL_D16>,
-                        <&topckgen CLK_TOP_AUDPLL_D24>,
-                        <&topckgen CLK_TOP_AUDINTBUS_SEL>,
-                        <&clk26m>,
-                        <&topckgen CLK_TOP_SYSPLL1_D4>,
-                        <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
-                        <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
-                        <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
-                        <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
-                        <&topckgen CLK_TOP_AUD_K5_SRC_SEL>,
-                        <&topckgen CLK_TOP_AUD_K6_SRC_SEL>,
-                        <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
-                        <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
-                        <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
-                        <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
-                        <&topckgen CLK_TOP_AUD_K5_SRC_DIV>,
-                        <&topckgen CLK_TOP_AUD_K6_SRC_DIV>,
-                        <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
-                        <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
-                        <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
-                        <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
-                        <&topckgen CLK_TOP_AUD_I2S5_MCLK>,
-                        <&topckgen CLK_TOP_AUD_I2S6_MCLK>,
-                        <&topckgen CLK_TOP_ASM_M_SEL>,
-                        <&topckgen CLK_TOP_ASM_H_SEL>,
-                        <&topckgen CLK_TOP_UNIVPLL2_D4>,
-                        <&topckgen CLK_TOP_UNIVPLL2_D2>,
-                        <&topckgen CLK_TOP_SYSPLL_D5>;
+       audsys: audio-subsystem@11220000 {
+               compatible = "mediatek,mt2701-audsys", "syscon", "simple-mfd";
+               ...
+
+               afe: audio-controller {
+                       compatible = "mediatek,mt2701-audio";
+                       interrupts =  <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+                                     <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+                       interrupt-names = "afe", "asys";
+                       power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+
+                       clocks = <&infracfg CLK_INFRA_AUDIO>,
+                                <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+                                <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+                                <&topckgen CLK_TOP_AUD_48K_TIMING>,
+                                <&topckgen CLK_TOP_AUD_44K_TIMING>,
+                                <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
+                                <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
+                                <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
+                                <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
+                                <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
+                                <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
+                                <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
+                                <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
+                                <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
+                                <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
+                                <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
+                                <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
+                                <&audsys CLK_AUD_I2SO1>,
+                                <&audsys CLK_AUD_I2SO2>,
+                                <&audsys CLK_AUD_I2SO3>,
+                                <&audsys CLK_AUD_I2SO4>,
+                                <&audsys CLK_AUD_I2SIN1>,
+                                <&audsys CLK_AUD_I2SIN2>,
+                                <&audsys CLK_AUD_I2SIN3>,
+                                <&audsys CLK_AUD_I2SIN4>,
+                                <&audsys CLK_AUD_ASRCO1>,
+                                <&audsys CLK_AUD_ASRCO2>,
+                                <&audsys CLK_AUD_ASRCO3>,
+                                <&audsys CLK_AUD_ASRCO4>,
+                                <&audsys CLK_AUD_AFE>,
+                                <&audsys CLK_AUD_AFE_CONN>,
+                                <&audsys CLK_AUD_A1SYS>,
+                                <&audsys CLK_AUD_A2SYS>,
+                                <&audsys CLK_AUD_AFE_MRGIF>;
+
+                       clock-names = "infra_sys_audio_clk",
+                                     "top_audio_mux1_sel",
+                                     "top_audio_mux2_sel",
+                                     "top_audio_a1sys_hp",
+                                     "top_audio_a2sys_hp",
+                                     "i2s0_src_sel",
+                                     "i2s1_src_sel",
+                                     "i2s2_src_sel",
+                                     "i2s3_src_sel",
+                                     "i2s0_src_div",
+                                     "i2s1_src_div",
+                                     "i2s2_src_div",
+                                     "i2s3_src_div",
+                                     "i2s0_mclk_en",
+                                     "i2s1_mclk_en",
+                                     "i2s2_mclk_en",
+                                     "i2s3_mclk_en",
+                                     "i2so0_hop_ck",
+                                     "i2so1_hop_ck",
+                                     "i2so2_hop_ck",
+                                     "i2so3_hop_ck",
+                                     "i2si0_hop_ck",
+                                     "i2si1_hop_ck",
+                                     "i2si2_hop_ck",
+                                     "i2si3_hop_ck",
+                                     "asrc0_out_ck",
+                                     "asrc1_out_ck",
+                                     "asrc2_out_ck",
+                                     "asrc3_out_ck",
+                                     "audio_afe_pd",
+                                     "audio_afe_conn_pd",
+                                     "audio_a1sys_pd",
+                                     "audio_a2sys_pd",
+                                     "audio_mrgif_pd";
 
-               clock-names = "infra_sys_audio_clk",
-                             "top_audio_mux1_sel",
-                             "top_audio_mux2_sel",
-                             "top_audio_mux1_div",
-                             "top_audio_mux2_div",
-                             "top_audio_48k_timing",
-                             "top_audio_44k_timing",
-                             "top_audpll_mux_sel",
-                             "top_apll_sel",
-                             "top_aud1_pll_98M",
-                             "top_aud2_pll_90M",
-                             "top_hadds2_pll_98M",
-                             "top_hadds2_pll_294M",
-                             "top_audpll",
-                             "top_audpll_d4",
-                             "top_audpll_d8",
-                             "top_audpll_d16",
-                             "top_audpll_d24",
-                             "top_audintbus_sel",
-                             "clk_26m",
-                             "top_syspll1_d4",
-                             "top_aud_k1_src_sel",
-                             "top_aud_k2_src_sel",
-                             "top_aud_k3_src_sel",
-                             "top_aud_k4_src_sel",
-                             "top_aud_k5_src_sel",
-                             "top_aud_k6_src_sel",
-                             "top_aud_k1_src_div",
-                             "top_aud_k2_src_div",
-                             "top_aud_k3_src_div",
-                             "top_aud_k4_src_div",
-                             "top_aud_k5_src_div",
-                             "top_aud_k6_src_div",
-                             "top_aud_i2s1_mclk",
-                             "top_aud_i2s2_mclk",
-                             "top_aud_i2s3_mclk",
-                             "top_aud_i2s4_mclk",
-                             "top_aud_i2s5_mclk",
-                             "top_aud_i2s6_mclk",
-                             "top_asm_m_sel",
-                             "top_asm_h_sel",
-                             "top_univpll2_d4",
-                             "top_univpll2_d2",
-                             "top_syspll_d5";
+                       assigned-clocks = <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+                                         <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+                                         <&topckgen CLK_TOP_AUD_MUX1_DIV>,
+                                         <&topckgen CLK_TOP_AUD_MUX2_DIV>;
+                       assigned-clock-parents = <&topckgen CLK_TOP_AUD1PLL_98M>,
+                                                <&topckgen CLK_TOP_AUD2PLL_90M>;
+                       assigned-clock-rates = <0>, <0>, <49152000>, <45158400>;
+               };
        };
index 601c518..4eb980b 100644 (file)
@@ -1,10 +1,31 @@
 * Freescale MXS audio complex with SGTL5000 codec
 
 Required properties:
-- compatible: "fsl,mxs-audio-sgtl5000"
-- model: The user-visible name of this sound complex
-- saif-controllers: The phandle list of the MXS SAIF controller
-- audio-codec: The phandle of the SGTL5000 audio codec
+- compatible           : "fsl,mxs-audio-sgtl5000"
+- model                        : The user-visible name of this sound complex
+- saif-controllers     : The phandle list of the MXS SAIF controller
+- audio-codec          : The phandle of the SGTL5000 audio codec
+- audio-routing                : A list of the connections between audio components.
+                         Each entry is a pair of strings, the first being the
+                         connection's sink, the second being the connection's
+                         source. Valid names could be power supplies, SGTL5000
+                         pins, and the jacks on the board:
+
+                         Power supplies:
+                          * Mic Bias
+
+                         SGTL5000 pins:
+                          * MIC_IN
+                          * LINE_IN
+                          * HP_OUT
+                          * LINE_OUT
+
+                         Board connectors:
+                          * Mic Jack
+                          * Line In Jack
+                          * Headphone Jack
+                          * Line Out Jack
+                          * Ext Spk
 
 Example:
 
@@ -14,4 +35,8 @@ sound {
        model = "imx28-evk-sgtl5000";
        saif-controllers = <&saif0 &saif1>;
        audio-codec = <&sgtl5000>;
+       audio-routing =
+               "MIC_IN", "Mic Jack",
+               "Mic Jack", "Mic Bias",
+               "Headphone Jack", "HP_OUT";
 };
index 2f5e973..d16d968 100644 (file)
@@ -69,7 +69,7 @@ Optional properties:
   - nuvoton,jack-insert-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
   - nuvoton,jack-eject-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
 
-  - nuvoton,crosstalk-bypass: make crosstalk function bypass if set.
+  - nuvoton,crosstalk-enable: make crosstalk function enable if set.
 
   - clocks: list of phandle and clock specifier pairs according to common clock bindings for the
       clocks described in clock-names
@@ -98,7 +98,7 @@ Example:
       nuvoton,short-key-debounce = <2>;
       nuvoton,jack-insert-debounce = <7>;
       nuvoton,jack-eject-debounce = <7>;
-      nuvoton,crosstalk-bypass;
+      nuvoton,crosstalk-enable;
 
       clock-names = "mclk";
       clocks = <&tegra_car TEGRA210_CLK_CLK_OUT_2>;
diff --git a/Documentation/devicetree/bindings/sound/pcm186x.txt b/Documentation/devicetree/bindings/sound/pcm186x.txt
new file mode 100644 (file)
index 0000000..1087f48
--- /dev/null
@@ -0,0 +1,42 @@
+Texas Instruments PCM186x Universal Audio ADC
+
+These devices support both I2C and SPI (configured with pin strapping
+on the board).
+
+Required properties:
+
+ - compatible : "ti,pcm1862",
+                "ti,pcm1863",
+                "ti,pcm1864",
+                "ti,pcm1865"
+
+ - reg : The I2C address of the device for I2C, the chip select
+         number for SPI.
+
+ - avdd-supply: Analog core power supply (3.3v)
+ - dvdd-supply: Digital core power supply
+ - iovdd-supply: Digital IO power supply
+        See regulator/regulator.txt for more information
+
+CODEC input pins:
+ * VINL1
+ * VINR1
+ * VINL2
+ * VINR2
+ * VINL3
+ * VINR3
+ * VINL4
+ * VINR4
+
+The pins can be used in referring sound node's audio-routing property.
+
+Example:
+
+       pcm186x: audio-codec@4a {
+               compatible = "ti,pcm1865";
+               reg = <0x4a>;
+
+               avdd-supply = <&reg_3v3_analog>;
+               dvdd-supply = <&reg_3v3>;
+               iovdd-supply = <&reg_1v8>;
+       };
index 0d0ab51..0cf0f81 100644 (file)
@@ -20,7 +20,7 @@ Required properties:
 
 Example:
 
-sh_fsi2: sh_fsi2@0xec230000 {
+sh_fsi2: sh_fsi2@ec230000 {
        compatible = "renesas,sh_fsi2";
        reg = <0xec230000 0x400>;
        interrupts = <0 146 0x4>;
index 085bec3..5bed9a5 100644 (file)
@@ -4,7 +4,7 @@ Renesas R-Car sound
 * Modules
 =============================================
 
-Renesas R-Car sound is constructed from below modules
+Renesas R-Car and RZ/G sound is constructed from below modules
 (for Gen2 or later)
 
  SCU           : Sampling Rate Converter Unit
@@ -197,12 +197,17 @@ Ex)
        [MEM] -> [SRC2] -> [CTU03] -+
 
        sound {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
                compatible = "simple-scu-audio-card";
                ...
-               simple-audio-card,cpu-0 {
+               simple-audio-card,cpu@0 {
+                       reg = <0>;
                        sound-dai = <&rcar_sound 0>;
                };
-               simple-audio-card,cpu-1 {
+               simple-audio-card,cpu@1 {
+                       reg = <1>;
                        sound-dai = <&rcar_sound 1>;
                };
                simple-audio-card,codec {
@@ -334,9 +339,11 @@ Required properties:
 
 - compatible                   : "renesas,rcar_sound-<soctype>", fallbacks
                                  "renesas,rcar_sound-gen1" if generation1, and
-                                 "renesas,rcar_sound-gen2" if generation2
+                                 "renesas,rcar_sound-gen2" if generation2 (or RZ/G1)
                                  "renesas,rcar_sound-gen3" if generation3
                                  Examples with soctypes are:
+                                   - "renesas,rcar_sound-r8a7743" (RZ/G1M)
+                                   - "renesas,rcar_sound-r8a7745" (RZ/G1E)
                                    - "renesas,rcar_sound-r8a7778" (R-Car M1A)
                                    - "renesas,rcar_sound-r8a7779" (R-Car H1)
                                    - "renesas,rcar_sound-r8a7790" (R-Car H2)
index 0a1dc4e..ec20c12 100644 (file)
@@ -33,7 +33,7 @@ Required properties on RK3288:
 
 Example for the rk3188 SPDIF controller:
 
-spdif: spdif@0x1011e000 {
+spdif: spdif@1011e000 {
        compatible = "rockchip,rk3188-spdif", "rockchip,rk3066-spdif";
        reg = <0x1011e000 0x2000>;
        interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
index 166f229..17c13e7 100644 (file)
@@ -140,6 +140,7 @@ sound {
        simple-audio-card,name = "Cubox Audio";
 
        simple-audio-card,dai-link@0 {          /* I2S - HDMI */
+               reg = <0>;
                format = "i2s";
                cpu {
                        sound-dai = <&audio1 0>;
@@ -150,6 +151,7 @@ sound {
        };
 
        simple-audio-card,dai-link@1 {          /* S/PDIF - HDMI */
+               reg = <1>;
                cpu {
                        sound-dai = <&audio1 1>;
                };
@@ -159,6 +161,7 @@ sound {
        };
 
        simple-audio-card,dai-link@2 {          /* S/PDIF - S/PDIF */
+               reg = <2>;
                cpu {
                        sound-dai = <&audio1 1>;
                };
index 40068ec..9c1ee52 100644 (file)
@@ -51,7 +51,7 @@ Optional properties:
 
 Example:
 
-       sti_uni_player1: sti-uni-player@0x8D81000 {
+       sti_uni_player1: sti-uni-player@8D81000 {
                compatible = "st,stih407-uni-player-hdmi";
                #sound-dai-cells = <0>;
                st,syscfg = <&syscfg_core>;
@@ -63,7 +63,7 @@ Example:
                st,tdm-mode = <1>;
        };
 
-       sti_uni_player2: sti-uni-player@0x8D82000 {
+       sti_uni_player2: sti-uni-player@8D82000 {
                compatible = "st,stih407-uni-player-pcm-out";
                #sound-dai-cells = <0>;
                st,syscfg = <&syscfg_core>;
@@ -74,7 +74,7 @@ Example:
                dma-names = "tx";
        };
 
-       sti_uni_player3: sti-uni-player@0x8D85000 {
+       sti_uni_player3: sti-uni-player@8D85000 {
                compatible = "st,stih407-uni-player-spdif";
                #sound-dai-cells = <0>;
                st,syscfg = <&syscfg_core>;
@@ -85,7 +85,7 @@ Example:
                dma-names = "tx";
        };
 
-       sti_uni_reader1: sti-uni-reader@0x8D84000 {
+       sti_uni_reader1: sti-uni-reader@8D84000 {
                compatible = "st,stih407-uni-reader-hdmi";
                #sound-dai-cells = <0>;
                st,syscfg = <&syscfg_core>;
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt b/Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt
new file mode 100644 (file)
index 0000000..864f5b0
--- /dev/null
@@ -0,0 +1,63 @@
+STMicroelectronics Audio Digital Filter Sigma Delta modulators(DFSDM)
+
+The DFSDM allows PDM microphones capture through SPI interface. The Audio
+interface is seems as a sub block of the DFSDM device.
+For details on DFSDM bindings refer to ../iio/adc/st,stm32-dfsdm-adc.txt
+
+Required properties:
+  - compatible: "st,stm32h7-dfsdm-dai".
+
+  - #sound-dai-cells : Must be equal to 0
+
+  - io-channels : phandle to iio dfsdm instance node.
+
+Example of a sound card using audio DFSDM node.
+
+       sound_card {
+               compatible = "audio-graph-card";
+
+               dais = <&cpu_port>;
+       };
+
+       dfsdm: dfsdm@40017000 {
+               compatible = "st,stm32h7-dfsdm";
+               reg = <0x40017000 0x400>;
+               clocks = <&rcc DFSDM1_CK>;
+               clock-names = "dfsdm";
+               #interrupt-cells = <1>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               dfsdm_adc0: filter@0 {
+                       compatible = "st,stm32-dfsdm-dmic";
+                       reg = <0>;
+                       interrupts = <110>;
+                       dmas = <&dmamux1 101 0x400 0x00>;
+                       dma-names = "rx";
+                       st,adc-channels = <1>;
+                       st,adc-channel-names = "dmic0";
+                       st,adc-channel-types = "SPI_R";
+                       st,adc-channel-clk-src = "CLKOUT";
+                       st,filter-order = <5>;
+
+                       dfsdm_dai0: dfsdm-dai {
+                               compatible = "st,stm32h7-dfsdm-dai";
+                               #sound-dai-cells = <0>;
+                               io-channels = <&dfsdm_adc0 0>;
+                               cpu_port: port {
+                               dfsdm_endpoint: endpoint {
+                                       remote-endpoint = <&dmic0_endpoint>;
+                               };
+                       };
+               };
+       };
+
+       dmic0: dmic@0 {
+               compatible = "dmic-codec";
+               #sound-dai-cells = <0>;
+               port {
+                       dmic0_endpoint: endpoint {
+                               remote-endpoint = <&dfsdm_endpoint>;
+                       };
+               };
+       };
index 1f9cd70..b1acc1a 100644 (file)
@@ -20,11 +20,6 @@ Required properties:
 
 Optional properties:
   - resets: Reference to a reset controller asserting the SAI
-  - st,sync: specify synchronization mode.
-       By default SAI sub-block is in asynchronous mode.
-       This property sets SAI sub-block as slave of another SAI sub-block.
-       Must contain the phandle and index of the sai sub-block providing
-       the synchronization.
 
 SAI subnodes:
 Two subnodes corresponding to SAI sub-block instances A et B can be defined.
@@ -44,6 +39,13 @@ SAI subnodes required properties:
   - pinctrl-names: should contain only value "default"
   - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
 
+SAI subnodes Optional properties:
+  - st,sync: specify synchronization mode.
+       By default SAI sub-block is in asynchronous mode.
+       This property sets SAI sub-block as slave of another SAI sub-block.
+       Must contain the phandle and index of the sai sub-block providing
+       the synchronization.
+
 The device node should contain one 'port' child node with one child 'endpoint'
 node, according to the bindings defined in Documentation/devicetree/bindings/
 graph.txt.
index 05d7135..b9d50d6 100644 (file)
@@ -8,6 +8,7 @@ Required properties:
 - compatible: should be one of the following:
    - "allwinner,sun4i-a10-i2s"
    - "allwinner,sun6i-a31-i2s"
+   - "allwinner,sun8i-a83t-i2s"
    - "allwinner,sun8i-h3-i2s"
 - reg: physical base address of the controller and length of memory mapped
   region.
@@ -23,6 +24,7 @@ Required properties:
 
 Required properties for the following compatibles:
        - "allwinner,sun6i-a31-i2s"
+       - "allwinner,sun8i-a83t-i2s"
        - "allwinner,sun8i-h3-i2s"
 - resets: phandle to the reset line for this codec
 
index 40d94f8..7481653 100644 (file)
@@ -6,10 +6,12 @@ audio playback. For more product information please see the links below:
 
 http://www.ti.com/product/TAS5720L
 http://www.ti.com/product/TAS5720M
+http://www.ti.com/product/TAS5722L
 
 Required properties:
 
-- compatible : "ti,tas5720"
+- compatible : "ti,tas5720",
+               "ti,tas5722"
 - reg : I2C slave address
 - dvdd-supply : phandle to a 3.3-V supply for the digital circuitry
 - pvdd-supply : phandle to a supply used for the Class-D amp and the analog
index 23ba522..1620e68 100644 (file)
@@ -6,18 +6,18 @@ Required properties:
 
 - reg : the I2C address of the device
 
+- #sound-dai-cells : must be 0.
+
 Example:
 
 &i2c1 {
-       clock-frequency = <100000>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c1>;
-       status = "okay";
 
-       codec: tfa9879@6c {
+       amp: amp@6c {
                #sound-dai-cells = <0>;
                compatible = "nxp,tfa9879";
                reg = <0x6c>;
-        };
+       };
 };
 
diff --git a/Documentation/devicetree/bindings/sound/ti,tas6424.txt b/Documentation/devicetree/bindings/sound/ti,tas6424.txt
new file mode 100644 (file)
index 0000000..1c4ada0
--- /dev/null
@@ -0,0 +1,20 @@
+Texas Instruments TAS6424 Quad-Channel Audio amplifier
+
+The TAS6424 serial control bus communicates through I2C protocols.
+
+Required properties:
+       - compatible: "ti,tas6424" - TAS6424
+       - reg: I2C slave address
+       - sound-dai-cells: must be equal to 0
+
+Example:
+
+tas6424: tas6424@6a {
+       compatible = "ti,tas6424";
+       reg = <0x6a>;
+
+       #sound-dai-cells = <0>;
+};
+
+For more product information please see the link below:
+http://www.ti.com/product/TAS6424-Q1
index 6fbba56..5b3c33b 100644 (file)
@@ -22,7 +22,7 @@ Required properties:
 
 Optional properties:
 
-- gpio-reset - gpio pin number used for codec reset
+- reset-gpios - GPIO specification for the active low RESET input.
 - ai31xx-micbias-vg - MicBias Voltage setting
         1 or MICBIAS_2_0V - MICBIAS output is powered to 2.0V
         2 or MICBIAS_2_5V - MICBIAS output is powered to 2.5V
@@ -30,6 +30,10 @@ Optional properties:
        If this node is not mentioned or if the value is unknown, then
        micbias is set to 2.0V.
 
+Deprecated properties:
+
+- gpio-reset - gpio pin number used for codec reset
+
 CODEC output pins:
   * HPL
   * HPR
@@ -48,6 +52,7 @@ CODEC input pins:
 The pins can be used in referring sound node's audio-routing property.
 
 Example:
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/sound/tlv320aic31xx-micbias.h>
 
 tlv320aic31xx: tlv320aic31xx@18 {
@@ -56,6 +61,8 @@ tlv320aic31xx: tlv320aic31xx@18 {
 
        ai31xx-micbias-vg = <MICBIAS_OFF>;
 
+       reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
        HPVDD-supply = <&regulator>;
        SPRVDD-supply = <&regulator>;
        SPLVDD-supply = <&regulator>;
index ba5b45c..9796c46 100644 (file)
@@ -17,7 +17,7 @@ Required properties:
 
 Optional properties:
 
-- gpio-reset - gpio pin number used for codec reset
+- reset-gpios - GPIO specification for the active low RESET input.
 - ai3x-gpio-func - <array of 2 int> - AIC3X_GPIO1 & AIC3X_GPIO2 Functionality
                                    - Not supported on tlv320aic3104
 - ai3x-micbias-vg - MicBias Voltage required.
@@ -34,6 +34,10 @@ Optional properties:
 - AVDD-supply, IOVDD-supply, DRVDD-supply, DVDD-supply : power supplies for the
   device as covered in Documentation/devicetree/bindings/regulator/regulator.txt
 
+Deprecated properties:
+
+- gpio-reset - gpio pin number used for codec reset
+
 CODEC output pins:
   * LLOUT
   * RLOUT
@@ -61,10 +65,14 @@ The pins can be used in referring sound node's audio-routing property.
 
 Example:
 
+#include <dt-bindings/gpio/gpio.h>
+
 tlv320aic3x: tlv320aic3x@1b {
        compatible = "ti,tlv320aic3x";
        reg = <0x1b>;
 
+       reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
        AVDD-supply = <&regulator>;
        IOVDD-supply = <&regulator>;
        DRVDD-supply = <&regulator>;
diff --git a/Documentation/devicetree/bindings/sound/tscs42xx.txt b/Documentation/devicetree/bindings/sound/tscs42xx.txt
new file mode 100644 (file)
index 0000000..2ac2f09
--- /dev/null
@@ -0,0 +1,16 @@
+TSCS42XX Audio CODEC
+
+Required Properties:
+
+       - compatible :  "tempo,tscs42A1" for analog mic
+                       "tempo,tscs42A2" for digital mic
+
+       - reg :         <0x71> for analog mic
+                       <0x69> for digital mic
+
+Example:
+
+wookie: codec@69 {
+       compatible = "tempo,tscs42A2";
+       reg = <0x69>;
+};
diff --git a/Documentation/devicetree/bindings/sound/uniphier,evea.txt b/Documentation/devicetree/bindings/sound/uniphier,evea.txt
new file mode 100644 (file)
index 0000000..3f31b23
--- /dev/null
@@ -0,0 +1,26 @@
+Socionext EVEA - UniPhier SoC internal codec driver
+
+Required properties:
+- compatible      : should be "socionext,uniphier-evea".
+- reg             : offset and length of the register set for the device.
+- clock-names     : should include following entries:
+                    "evea", "exiv"
+- clocks          : a list of phandle, should contain an entry for each
+                    entries in clock-names.
+- reset-names     : should include following entries:
+                    "evea", "exiv", "adamv"
+- resets          : a list of phandle, should contain reset entries of
+                    reset-names.
+- #sound-dai-cells: should be 1.
+
+Example:
+
+       codec {
+               compatible = "socionext,uniphier-evea";
+               reg = <0x57900000 0x1000>;
+               clock-names = "evea", "exiv";
+               clocks = <&sys_clk 41>, <&sys_clk 42>;
+               reset-names = "evea", "exiv", "adamv";
+               resets = <&sys_rst 41>, <&sys_rst 42>, <&adamv_rst 0>;
+               #sound-dai-cells = <1>;
+       };
index 2c1e6a4..e0fa61a 100644 (file)
@@ -19,7 +19,7 @@ Recommended properties :
 
 Example:
 
-spi1: spi@0x4000c400 { /* USART1 */
+spi1: spi@4000c400 { /* USART1 */
        #address-cells = <1>;
        #size-cells = <0>;
        compatible = "energymicro,efm32-spi";
index 5bf1396..e3c48b2 100644 (file)
@@ -12,24 +12,30 @@ Required properties:
   - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
 - reg : Offset and length of the register set for the device
 - interrupts : Should contain CSPI/eCSPI interrupt
-- cs-gpios : Specifies the gpio pins to be used for chipselects.
 - clocks : Clock specifiers for both ipg and per clocks.
 - clock-names : Clock names should include both "ipg" and "per"
 See the clock consumer binding,
        Documentation/devicetree/bindings/clock/clock-bindings.txt
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
-               Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: DMA request names should include "tx" and "rx" if present.
 
-Obsolete properties:
-- fsl,spi-num-chipselects : Contains the number of the chipselect
+Recommended properties:
+- cs-gpios : GPIOs to use as chip selects, see spi-bus.txt.  While the native chip
+select lines can be used, they appear to always generate a pulse between each
+word of a transfer.  Most use cases will require GPIO based chip selects to
+generate a valid transaction.
 
 Optional properties:
+- num-cs :  Number of total chip selects, see spi-bus.txt.
+- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
+Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: DMA request names, if present, should include "tx" and "rx".
 - fsl,spi-rdy-drctl: Integer, representing the value of DRCTL, the register
 controlling the SPI_READY handling. Note that to enable the DRCTL consideration,
 the SPI_READY mode-flag needs to be set too.
 Valid values are: 0 (disabled), 1 (edge-triggered burst) and 2 (level-triggered burst).
 
+Obsolete properties:
+- fsl,spi-num-chipselects : Contains the number of the chipselect
+
 Example:
 
 ecspi@70010000 {
index 88b6ea1..44d7cb2 100644 (file)
@@ -239,7 +239,7 @@ cpus {
         * A simple fan controller which supports 10 speeds of operation
         * (represented as 0-9).
         */
-       fan0: fan@0x48 {
+       fan0: fan@48 {
                ...
                cooling-min-level = <0>;
                cooling-max-level = <9>;
@@ -252,7 +252,7 @@ ocp {
        /*
         * A simple IC with a single bandgap temperature sensor.
         */
-       bandgap0: bandgap@0x0000ED00 {
+       bandgap0: bandgap@0000ED00 {
                ...
                #thermal-sensor-cells = <0>;
        };
@@ -330,7 +330,7 @@ ocp {
        /*
         * A simple IC with several bandgap temperature sensors.
         */
-       bandgap0: bandgap@0x0000ED00 {
+       bandgap0: bandgap@0000ED00 {
                ...
                #thermal-sensor-cells = <1>;
        };
@@ -447,7 +447,7 @@ one thermal zone.
        /*
         * A simple IC with a single temperature sensor.
         */
-       adc: sensor@0x49 {
+       adc: sensor@49 {
                ...
                #thermal-sensor-cells = <0>;
        };
@@ -458,7 +458,7 @@ ocp {
        /*
         * A simple IC with a single bandgap temperature sensor.
         */
-       bandgap0: bandgap@0x0000ED00 {
+       bandgap0: bandgap@0000ED00 {
                ...
                #thermal-sensor-cells = <0>;
        };
@@ -516,7 +516,7 @@ with many sensors and many cooling devices.
        /*
         * An IC with several temperature sensor.
         */
-       adc_dummy: sensor@0x50 {
+       adc_dummy: sensor@50 {
                ...
                #thermal-sensor-cells = <1>; /* sensor internal ID */
        };
index 1f69ee1..21d9a93 100644 (file)
@@ -32,7 +32,7 @@ Optional properties:
 
 Example:
 
-       ufsphy1: ufsphy@0xfc597000 {
+       ufsphy1: ufsphy@fc597000 {
                compatible = "qcom,ufs-phy-qmp-20nm";
                reg = <0xfc597000 0x800>;
                reg-names = "phy_mem";
@@ -53,7 +53,7 @@ Example:
                        <&clock_gcc clk_gcc_ufs_rx_cfg_clk>;
        };
 
-       ufshc@0xfc598000 {
+       ufshc@fc598000 {
                ...
                phys = <&ufsphy1>;
                phy-names = "ufsphy";
index a99ed55..c39dfef 100644 (file)
@@ -46,7 +46,7 @@ Note: If above properties are not defined it can be assumed that the supply
 regulators or clocks are always on.
 
 Example:
-       ufshc@0xfc598000 {
+       ufshc@fc598000 {
                compatible = "jedec,ufs-1.1";
                reg = <0xfc598000 0x800>;
                interrupts = <0 28 0>;
index 7a33f22..7a198a3 100644 (file)
@@ -95,6 +95,7 @@ usb: usb@47400000 {
                reg = <0x47401300 0x100>;
                reg-names = "phy";
                ti,ctrl_mod = <&ctrl_mod>;
+               #phy-cells = <0>;
        };
 
        usb0: usb@47401000 {
@@ -141,6 +142,7 @@ usb: usb@47400000 {
                reg = <0x47401b00 0x100>;
                reg-names = "phy";
                ti,ctrl_mod = <&ctrl_mod>;
+               #phy-cells = <0>;
        };
 
        usb1: usb@47401800 {
index 9feea6c..065c91d 100644 (file)
@@ -22,7 +22,7 @@ See: Documentation/devicetree/bindings/reset/reset.txt
 
 Example:
 
-       ehci1: usb@0xfe203e00 {
+       ehci1: usb@fe203e00 {
                compatible = "st,st-ehci-300x";
                reg = <0xfe203e00 0x100>;
                interrupts = <GIC_SPI 148 IRQ_TYPE_NONE>;
index d893ec9..44c998c 100644 (file)
@@ -20,7 +20,7 @@ See: Documentation/devicetree/bindings/reset/reset.txt
 
 Example:
 
-       ohci0: usb@0xfe1ffc00 {
+       ohci0: usb@fe1ffc00 {
                compatible = "st,st-ohci-300x";
                reg = <0xfe1ffc00 0x100>;
                interrupts = <GIC_SPI 149 IRQ_TYPE_NONE>;
index 0994bdd..f776fb8 100644 (file)
@@ -347,6 +347,7 @@ tcg Trusted Computing Group
 tcl    Toby Churchill Ltd.
 technexion     TechNexion
 technologic    Technologic Systems
+tempo  Tempo Semiconductor
 terasic        Terasic Inc.
 thine  THine Electronics, Inc.
 ti     Texas Instruments
index e27763e..3c7a1cd 100644 (file)
@@ -6,7 +6,7 @@ reg: Register address and length for watchdog registers
 
 Example:
 
-watchdog: jz4740-watchdog@0x10002000 {
+watchdog: jz4740-watchdog@10002000 {
        compatible = "ingenic,jz4740-watchdog";
        reg = <0x10002000 0x100>;
 };
diff --git a/Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt b/Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt
new file mode 100644 (file)
index 0000000..3de9618
--- /dev/null
@@ -0,0 +1,39 @@
+Zodiac Inflight Innovations RAVE Supervisory Processor Watchdog Bindings
+
+RAVE SP watchdog device is a "MFD cell" device corresponding to
+watchdog functionality of RAVE Supervisory Processor. It is expected
+that its Device Tree node is specified as a child of the node
+corresponding to the parent RAVE SP device (as documented in
+Documentation/devicetree/bindings/mfd/zii,rave-sp.txt)
+
+Required properties:
+
+- compatible: Depending on wire protocol implemented by RAVE SP
+  firmware, should be one of:
+       - "zii,rave-sp-watchdog"
+       - "zii,rave-sp-watchdog-legacy"
+
+Optional properties:
+
+- wdt-timeout: Two byte nvmem cell specified as per
+               Documentation/devicetree/bindings/nvmem/nvmem.txt
+
+Example:
+
+       rave-sp {
+               compatible = "zii,rave-sp-rdu1";
+               current-speed = <38400>;
+
+               eeprom {
+                       wdt_timeout: wdt-timeout@8E {
+                               reg = <0x8E 2>;
+                       };
+               };
+
+               watchdog {
+                       compatible = "zii,rave-sp-watchdog";
+                       nvmem-cells = <&wdt_timeout>;
+                       nvmem-cell-names = "wdt-timeout";
+               };
+       }
+
index 6245c99..fbbb283 100644 (file)
@@ -185,7 +185,7 @@ The details of these operations are:
       void dma_async_issue_pending(struct dma_chan *chan);
 
 Further APIs:
-------------
+-------------
 
 1. Terminate APIs
 
diff --git a/Documentation/driver-api/iio/hw-consumer.rst b/Documentation/driver-api/iio/hw-consumer.rst
new file mode 100644 (file)
index 0000000..8facce6
--- /dev/null
@@ -0,0 +1,51 @@
+===========
+HW consumer
+===========
+An IIO device can be directly connected to another device in hardware. in this
+case the buffers between IIO provider and IIO consumer are handled by hardware.
+The Industrial I/O HW consumer offers a way to bond these IIO devices without
+software buffer for data. The implementation can be found under
+:file:`drivers/iio/buffer/hw-consumer.c`
+
+
+* struct :c:type:`iio_hw_consumer` â€” Hardware consumer structure
+* :c:func:`iio_hw_consumer_alloc` â€” Allocate IIO hardware consumer
+* :c:func:`iio_hw_consumer_free` â€” Free IIO hardware consumer
+* :c:func:`iio_hw_consumer_enable` â€” Enable IIO hardware consumer
+* :c:func:`iio_hw_consumer_disable` â€” Disable IIO hardware consumer
+
+
+HW consumer setup
+=================
+
+As standard IIO device the implementation is based on IIO provider/consumer.
+A typical IIO HW consumer setup looks like this::
+
+       static struct iio_hw_consumer *hwc;
+
+       static const struct iio_info adc_info = {
+               .read_raw = adc_read_raw,
+       };
+
+       static int adc_read_raw(struct iio_dev *indio_dev,
+                               struct iio_chan_spec const *chan, int *val,
+                               int *val2, long mask)
+       {
+               ret = iio_hw_consumer_enable(hwc);
+
+               /* Acquire data */
+
+               ret = iio_hw_consumer_disable(hwc);
+       }
+
+       static int adc_probe(struct platform_device *pdev)
+       {
+               hwc = devm_iio_hw_consumer_alloc(&iio->dev);
+       }
+
+More details
+============
+.. kernel-doc:: include/linux/iio/hw-consumer.h
+.. kernel-doc:: drivers/iio/buffer/industrialio-hw-consumer.c
+   :export:
+
index e5c3922..7fba341 100644 (file)
@@ -15,3 +15,4 @@ Contents:
    buffers
    triggers
    triggered-buffers
+   hw-consumer
index 01a6c8b..ca85e5e 100644 (file)
@@ -25,9 +25,6 @@ PCI Support Library
 .. kernel-doc:: drivers/pci/irq.c
    :export:
 
-.. kernel-doc:: drivers/pci/htirq.c
-   :export:
-
 .. kernel-doc:: drivers/pci/probe.c
    :export:
 
index 53c1b0b..1128705 100644 (file)
@@ -777,17 +777,51 @@ The driver can indicate that by setting ``DPM_FLAG_SMART_SUSPEND`` in
 runtime suspend at the beginning of the ``suspend_late`` phase of system-wide
 suspend (or in the ``poweroff_late`` phase of hibernation), when runtime PM
 has been disabled for it, under the assumption that its state should not change
-after that point until the system-wide transition is over.  If that happens, the
-driver's system-wide resume callbacks, if present, may still be invoked during
-the subsequent system-wide resume transition and the device's runtime power
-management status may be set to "active" before enabling runtime PM for it,
-so the driver must be prepared to cope with the invocation of its system-wide
-resume callbacks back-to-back with its ``->runtime_suspend`` one (without the
-intervening ``->runtime_resume`` and so on) and the final state of the device
-must reflect the "active" status for runtime PM in that case.
+after that point until the system-wide transition is over (the PM core itself
+does that for devices whose "noirq", "late" and "early" system-wide PM callbacks
+are executed directly by it).  If that happens, the driver's system-wide resume
+callbacks, if present, may still be invoked during the subsequent system-wide
+resume transition and the device's runtime power management status may be set
+to "active" before enabling runtime PM for it, so the driver must be prepared to
+cope with the invocation of its system-wide resume callbacks back-to-back with
+its ``->runtime_suspend`` one (without the intervening ``->runtime_resume`` and
+so on) and the final state of the device must reflect the "active" runtime PM
+status in that case.
 
 During system-wide resume from a sleep state it's easiest to put devices into
 the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`.
-Refer to that document for more information regarding this particular issue as
+[Refer to that document for more information regarding this particular issue as
 well as for information on the device runtime power management framework in
-general.
+general.]
+
+However, it often is desirable to leave devices in suspend after system
+transitions to the working state, especially if those devices had been in
+runtime suspend before the preceding system-wide suspend (or analogous)
+transition.  Device drivers can use the ``DPM_FLAG_LEAVE_SUSPENDED`` flag to
+indicate to the PM core (and middle-layer code) that they prefer the specific
+devices handled by them to be left suspended and they have no problems with
+skipping their system-wide resume callbacks for this reason.  Whether or not the
+devices will actually be left in suspend may depend on their state before the
+given system suspend-resume cycle and on the type of the system transition under
+way.  In particular, devices are not left suspended if that transition is a
+restore from hibernation, as device states are not guaranteed to be reflected
+by the information stored in the hibernation image in that case.
+
+The middle-layer code involved in the handling of the device is expected to
+indicate to the PM core if the device may be left in suspend by setting its
+:c:member:`power.may_skip_resume` status bit which is checked by the PM core
+during the "noirq" phase of the preceding system-wide suspend (or analogous)
+transition.  The middle layer is then responsible for handling the device as
+appropriate in its "noirq" resume callback, which is executed regardless of
+whether or not the device is left suspended, but the other resume callbacks
+(except for ``->complete``) will be skipped automatically by the PM core if the
+device really can be left in suspend.
+
+For devices whose "noirq", "late" and "early" driver callbacks are invoked
+directly by the PM core, all of the system-wide resume callbacks are skipped if
+``DPM_FLAG_LEAVE_SUSPENDED`` is set and the device is in runtime suspend during
+the ``suspend_noirq`` (or analogous) phase or the transition under way is a
+proper system suspend (rather than anything related to hibernation) and the
+device's wakeup settings are suitable for runtime PM (that is, it cannot
+generate wakeup signals at all or it is allowed to wake up the system from
+sleep).
index c180045..7c1bb3d 100644 (file)
@@ -384,6 +384,9 @@ RESET
   devm_reset_control_get()
   devm_reset_controller_register()
 
+SERDEV
+  devm_serdev_device_open()
+
 SLAVE DMA ENGINE
   devm_acpi_dma_controller_register()
 
index c0727dc..f2f3f85 100644 (file)
@@ -25,8 +25,8 @@ available from the following download page.  At least "mkfs.nilfs2",
 cleaner or garbage collector) are required.  Details on the tools are
 described in the man pages included in the package.
 
-Project web page:    http://nilfs.sourceforge.net/
-Download page:       http://nilfs.sourceforge.net/en/download.html
+Project web page:    https://nilfs.sourceforge.io/
+Download page:       https://nilfs.sourceforge.io/en/download.html
 List info:           http://vger.kernel.org/vger-lists.html#linux-nilfs
 
 Caveats
index 8caa607..e6a5f49 100644 (file)
@@ -156,6 +156,40 @@ handle it in two different ways:
    root of the overlay.  Finally the directory is moved to the new
    location.
 
+There are several ways to tune the "redirect_dir" feature.
+
+Kernel config options:
+
+- OVERLAY_FS_REDIRECT_DIR:
+    If this is enabled, then redirect_dir is turned on by  default.
+- OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW:
+    If this is enabled, then redirects are always followed by default. Enabling
+    this results in a less secure configuration.  Enable this option only when
+    worried about backward compatibility with kernels that have the redirect_dir
+    feature and follow redirects even if turned off.
+
+Module options (can also be changed through /sys/module/overlay/parameters/*):
+
+- "redirect_dir=BOOL":
+    See OVERLAY_FS_REDIRECT_DIR kernel config option above.
+- "redirect_always_follow=BOOL":
+    See OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW kernel config option above.
+- "redirect_max=NUM":
+    The maximum number of bytes in an absolute redirect (default is 256).
+
+Mount options:
+
+- "redirect_dir=on":
+    Redirects are enabled.
+- "redirect_dir=follow":
+    Redirects are not created, but followed.
+- "redirect_dir=off":
+    Redirects are not created and only followed if "redirect_always_follow"
+    feature is enabled in the kernel/module config.
+- "redirect_dir=nofollow":
+    Redirects are not created and not followed (equivalent to "redirect_dir=off"
+    if "redirect_always_follow" feature is not enabled).
+
 Non-directories
 ---------------
 
index 2e7ee03..e94d3ac 100644 (file)
@@ -341,10 +341,7 @@ GuC
 GuC-specific firmware loader
 ----------------------------
 
-.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
-   :doc: GuC-specific firmware loader
-
-.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c
    :internal:
 
 GuC-based command submission
index 262722d..c4a293a 100644 (file)
@@ -200,10 +200,14 @@ module state. Dependency expressions have the following syntax:
 <expr> ::= <symbol>                             (1)
            <symbol> '=' <symbol>                (2)
            <symbol> '!=' <symbol>               (3)
-           '(' <expr> ')'                       (4)
-           '!' <expr>                           (5)
-           <expr> '&&' <expr>                   (6)
-           <expr> '||' <expr>                   (7)
+           <symbol1> '<' <symbol2>              (4)
+           <symbol1> '>' <symbol2>              (4)
+           <symbol1> '<=' <symbol2>             (4)
+           <symbol1> '>=' <symbol2>             (4)
+           '(' <expr> ')'                       (5)
+           '!' <expr>                           (6)
+           <expr> '&&' <expr>                   (7)
+           <expr> '||' <expr>                   (8)
 
 Expressions are listed in decreasing order of precedence. 
 
@@ -214,10 +218,13 @@ Expressions are listed in decreasing order of precedence.
     otherwise 'n'.
 (3) If the values of both symbols are equal, it returns 'n',
     otherwise 'y'.
-(4) Returns the value of the expression. Used to override precedence.
-(5) Returns the result of (2-/expr/).
-(6) Returns the result of min(/expr/, /expr/).
-(7) Returns the result of max(/expr/, /expr/).
+(4) If value of <symbol1> is respectively lower, greater, lower-or-equal,
+    or greater-or-equal than value of <symbol2>, it returns 'y',
+    otherwise 'n'.
+(5) Returns the value of the expression. Used to override precedence.
+(6) Returns the result of (2-/expr/).
+(7) Returns the result of min(/expr/, /expr/).
+(8) Returns the result of max(/expr/, /expr/).
 
 An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2
 respectively for calculations). A menu entry becomes visible when its
diff --git a/Documentation/locking/crossrelease.txt b/Documentation/locking/crossrelease.txt
deleted file mode 100644 (file)
index bdf1423..0000000
+++ /dev/null
@@ -1,874 +0,0 @@
-Crossrelease
-============
-
-Started by Byungchul Park <byungchul.park@lge.com>
-
-Contents:
-
- (*) Background
-
-     - What causes deadlock
-     - How lockdep works
-
- (*) Limitation
-
-     - Limit lockdep
-     - Pros from the limitation
-     - Cons from the limitation
-     - Relax the limitation
-
- (*) Crossrelease
-
-     - Introduce crossrelease
-     - Introduce commit
-
- (*) Implementation
-
-     - Data structures
-     - How crossrelease works
-
- (*) Optimizations
-
-     - Avoid duplication
-     - Lockless for hot paths
-
- (*) APPENDIX A: What lockdep does to work aggresively
-
- (*) APPENDIX B: How to avoid adding false dependencies
-
-
-==========
-Background
-==========
-
-What causes deadlock
---------------------
-
-A deadlock occurs when a context is waiting for an event to happen,
-which is impossible because another (or the) context who can trigger the
-event is also waiting for another (or the) event to happen, which is
-also impossible due to the same reason.
-
-For example:
-
-   A context going to trigger event C is waiting for event A to happen.
-   A context going to trigger event A is waiting for event B to happen.
-   A context going to trigger event B is waiting for event C to happen.
-
-A deadlock occurs when these three wait operations run at the same time,
-because event C cannot be triggered if event A does not happen, which in
-turn cannot be triggered if event B does not happen, which in turn
-cannot be triggered if event C does not happen. After all, no event can
-be triggered since any of them never meets its condition to wake up.
-
-A dependency might exist between two waiters and a deadlock might happen
-due to an incorrect releationship between dependencies. Thus, we must
-define what a dependency is first. A dependency exists between them if:
-
-   1. There are two waiters waiting for each event at a given time.
-   2. The only way to wake up each waiter is to trigger its event.
-   3. Whether one can be woken up depends on whether the other can.
-
-Each wait in the example creates its dependency like:
-
-   Event C depends on event A.
-   Event A depends on event B.
-   Event B depends on event C.
-
-   NOTE: Precisely speaking, a dependency is one between whether a
-   waiter for an event can be woken up and whether another waiter for
-   another event can be woken up. However from now on, we will describe
-   a dependency as if it's one between an event and another event for
-   simplicity.
-
-And they form circular dependencies like:
-
-    -> C -> A -> B -
-   /                \
-   \                /
-    ----------------
-
-   where 'A -> B' means that event A depends on event B.
-
-Such circular dependencies lead to a deadlock since no waiter can meet
-its condition to wake up as described.
-
-CONCLUSION
-
-Circular dependencies cause a deadlock.
-
-
-How lockdep works
------------------
-
-Lockdep tries to detect a deadlock by checking dependencies created by
-lock operations, acquire and release. Waiting for a lock corresponds to
-waiting for an event, and releasing a lock corresponds to triggering an
-event in the previous section.
-
-In short, lockdep does:
-
-   1. Detect a new dependency.
-   2. Add the dependency into a global graph.
-   3. Check if that makes dependencies circular.
-   4. Report a deadlock or its possibility if so.
-
-For example, consider a graph built by lockdep that looks like:
-
-   A -> B -
-           \
-            -> E
-           /
-   C -> D -
-
-   where A, B,..., E are different lock classes.
-
-Lockdep will add a dependency into the graph on detection of a new
-dependency. For example, it will add a dependency 'E -> C' when a new
-dependency between lock E and lock C is detected. Then the graph will be:
-
-       A -> B -
-               \
-                -> E -
-               /      \
-    -> C -> D -        \
-   /                   /
-   \                  /
-    ------------------
-
-   where A, B,..., E are different lock classes.
-
-This graph contains a subgraph which demonstrates circular dependencies:
-
-                -> E -
-               /      \
-    -> C -> D -        \
-   /                   /
-   \                  /
-    ------------------
-
-   where C, D and E are different lock classes.
-
-This is the condition under which a deadlock might occur. Lockdep
-reports it on detection after adding a new dependency. This is the way
-how lockdep works.
-
-CONCLUSION
-
-Lockdep detects a deadlock or its possibility by checking if circular
-dependencies were created after adding each new dependency.
-
-
-==========
-Limitation
-==========
-
-Limit lockdep
--------------
-
-Limiting lockdep to work on only typical locks e.g. spin locks and
-mutexes, which are released within the acquire context, the
-implementation becomes simple but its capacity for detection becomes
-limited. Let's check pros and cons in next section.
-
-
-Pros from the limitation
-------------------------
-
-Given the limitation, when acquiring a lock, locks in a held_locks
-cannot be released if the context cannot acquire it so has to wait to
-acquire it, which means all waiters for the locks in the held_locks are
-stuck. It's an exact case to create dependencies between each lock in
-the held_locks and the lock to acquire.
-
-For example:
-
-   CONTEXT X
-   ---------
-   acquire A
-   acquire B /* Add a dependency 'A -> B' */
-   release B
-   release A
-
-   where A and B are different lock classes.
-
-When acquiring lock A, the held_locks of CONTEXT X is empty thus no
-dependency is added. But when acquiring lock B, lockdep detects and adds
-a new dependency 'A -> B' between lock A in the held_locks and lock B.
-They can be simply added whenever acquiring each lock.
-
-And data required by lockdep exists in a local structure, held_locks
-embedded in task_struct. Forcing to access the data within the context,
-lockdep can avoid racy problems without explicit locks while handling
-the local data.
-
-Lastly, lockdep only needs to keep locks currently being held, to build
-a dependency graph. However, relaxing the limitation, it needs to keep
-even locks already released, because a decision whether they created
-dependencies might be long-deferred.
-
-To sum up, we can expect several advantages from the limitation:
-
-   1. Lockdep can easily identify a dependency when acquiring a lock.
-   2. Races are avoidable while accessing local locks in a held_locks.
-   3. Lockdep only needs to keep locks currently being held.
-
-CONCLUSION
-
-Given the limitation, the implementation becomes simple and efficient.
-
-
-Cons from the limitation
-------------------------
-
-Given the limitation, lockdep is applicable only to typical locks. For
-example, page locks for page access or completions for synchronization
-cannot work with lockdep.
-
-Can we detect deadlocks below, under the limitation?
-
-Example 1:
-
-   CONTEXT X      CONTEXT Y       CONTEXT Z
-   ---------      ---------       ----------
-                  mutex_lock A
-   lock_page B
-                  lock_page B
-                                  mutex_lock A /* DEADLOCK */
-                                  unlock_page B held by X
-                  unlock_page B
-                  mutex_unlock A
-                                  mutex_unlock A
-
-   where A and B are different lock classes.
-
-No, we cannot.
-
-Example 2:
-
-   CONTEXT X              CONTEXT Y
-   ---------              ---------
-                          mutex_lock A
-   mutex_lock A
-                          wait_for_complete B /* DEADLOCK */
-   complete B
-                          mutex_unlock A
-   mutex_unlock A
-
-   where A is a lock class and B is a completion variable.
-
-No, we cannot.
-
-CONCLUSION
-
-Given the limitation, lockdep cannot detect a deadlock or its
-possibility caused by page locks or completions.
-
-
-Relax the limitation
---------------------
-
-Under the limitation, things to create dependencies are limited to
-typical locks. However, synchronization primitives like page locks and
-completions, which are allowed to be released in any context, also
-create dependencies and can cause a deadlock. So lockdep should track
-these locks to do a better job. We have to relax the limitation for
-these locks to work with lockdep.
-
-Detecting dependencies is very important for lockdep to work because
-adding a dependency means adding an opportunity to check whether it
-causes a deadlock. The more lockdep adds dependencies, the more it
-thoroughly works. Thus Lockdep has to do its best to detect and add as
-many true dependencies into a graph as possible.
-
-For example, considering only typical locks, lockdep builds a graph like:
-
-   A -> B -
-           \
-            -> E
-           /
-   C -> D -
-
-   where A, B,..., E are different lock classes.
-
-On the other hand, under the relaxation, additional dependencies might
-be created and added. Assuming additional 'FX -> C' and 'E -> GX' are
-added thanks to the relaxation, the graph will be:
-
-         A -> B -
-                 \
-                  -> E -> GX
-                 /
-   FX -> C -> D -
-
-   where A, B,..., E, FX and GX are different lock classes, and a suffix
-   'X' is added on non-typical locks.
-
-The latter graph gives us more chances to check circular dependencies
-than the former. However, it might suffer performance degradation since
-relaxing the limitation, with which design and implementation of lockdep
-can be efficient, might introduce inefficiency inevitably. So lockdep
-should provide two options, strong detection and efficient detection.
-
-Choosing efficient detection:
-
-   Lockdep works with only locks restricted to be released within the
-   acquire context. However, lockdep works efficiently.
-
-Choosing strong detection:
-
-   Lockdep works with all synchronization primitives. However, lockdep
-   suffers performance degradation.
-
-CONCLUSION
-
-Relaxing the limitation, lockdep can add additional dependencies giving
-additional opportunities to check circular dependencies.
-
-
-============
-Crossrelease
-============
-
-Introduce crossrelease
-----------------------
-
-In order to allow lockdep to handle additional dependencies by what
-might be released in any context, namely 'crosslock', we have to be able
-to identify those created by crosslocks. The proposed 'crossrelease'
-feature provoides a way to do that.
-
-Crossrelease feature has to do:
-
-   1. Identify dependencies created by crosslocks.
-   2. Add the dependencies into a dependency graph.
-
-That's all. Once a meaningful dependency is added into graph, then
-lockdep would work with the graph as it did. The most important thing
-crossrelease feature has to do is to correctly identify and add true
-dependencies into the global graph.
-
-A dependency e.g. 'A -> B' can be identified only in the A's release
-context because a decision required to identify the dependency can be
-made only in the release context. That is to decide whether A can be
-released so that a waiter for A can be woken up. It cannot be made in
-other than the A's release context.
-
-It's no matter for typical locks because each acquire context is same as
-its release context, thus lockdep can decide whether a lock can be
-released in the acquire context. However for crosslocks, lockdep cannot
-make the decision in the acquire context but has to wait until the
-release context is identified.
-
-Therefore, deadlocks by crosslocks cannot be detected just when it
-happens, because those cannot be identified until the crosslocks are
-released. However, deadlock possibilities can be detected and it's very
-worth. See 'APPENDIX A' section to check why.
-
-CONCLUSION
-
-Using crossrelease feature, lockdep can work with what might be released
-in any context, namely crosslock.
-
-
-Introduce commit
-----------------
-
-Since crossrelease defers the work adding true dependencies of
-crosslocks until they are actually released, crossrelease has to queue
-all acquisitions which might create dependencies with the crosslocks.
-Then it identifies dependencies using the queued data in batches at a
-proper time. We call it 'commit'.
-
-There are four types of dependencies:
-
-1. TT type: 'typical lock A -> typical lock B'
-
-   Just when acquiring B, lockdep can see it's in the A's release
-   context. So the dependency between A and B can be identified
-   immediately. Commit is unnecessary.
-
-2. TC type: 'typical lock A -> crosslock BX'
-
-   Just when acquiring BX, lockdep can see it's in the A's release
-   context. So the dependency between A and BX can be identified
-   immediately. Commit is unnecessary, too.
-
-3. CT type: 'crosslock AX -> typical lock B'
-
-   When acquiring B, lockdep cannot identify the dependency because
-   there's no way to know if it's in the AX's release context. It has
-   to wait until the decision can be made. Commit is necessary.
-
-4. CC type: 'crosslock AX -> crosslock BX'
-
-   When acquiring BX, lockdep cannot identify the dependency because
-   there's no way to know if it's in the AX's release context. It has
-   to wait until the decision can be made. Commit is necessary.
-   But, handling CC type is not implemented yet. It's a future work.
-
-Lockdep can work without commit for typical locks, but commit step is
-necessary once crosslocks are involved. Introducing commit, lockdep
-performs three steps. What lockdep does in each step is:
-
-1. Acquisition: For typical locks, lockdep does what it originally did
-   and queues the lock so that CT type dependencies can be checked using
-   it at the commit step. For crosslocks, it saves data which will be
-   used at the commit step and increases a reference count for it.
-
-2. Commit: No action is reauired for typical locks. For crosslocks,
-   lockdep adds CT type dependencies using the data saved at the
-   acquisition step.
-
-3. Release: No changes are required for typical locks. When a crosslock
-   is released, it decreases a reference count for it.
-
-CONCLUSION
-
-Crossrelease introduces commit step to handle dependencies of crosslocks
-in batches at a proper time.
-
-
-==============
-Implementation
-==============
-
-Data structures
----------------
-
-Crossrelease introduces two main data structures.
-
-1. hist_lock
-
-   This is an array embedded in task_struct, for keeping lock history so
-   that dependencies can be added using them at the commit step. Since
-   it's local data, it can be accessed locklessly in the owner context.
-   The array is filled at the acquisition step and consumed at the
-   commit step. And it's managed in circular manner.
-
-2. cross_lock
-
-   One per lockdep_map exists. This is for keeping data of crosslocks
-   and used at the commit step.
-
-
-How crossrelease works
-----------------------
-
-It's the key of how crossrelease works, to defer necessary works to an
-appropriate point in time and perform in at once at the commit step.
-Let's take a look with examples step by step, starting from how lockdep
-works without crossrelease for typical locks.
-
-   acquire A /* Push A onto held_locks */
-   acquire B /* Push B onto held_locks and add 'A -> B' */
-   acquire C /* Push C onto held_locks and add 'B -> C' */
-   release C /* Pop C from held_locks */
-   release B /* Pop B from held_locks */
-   release A /* Pop A from held_locks */
-
-   where A, B and C are different lock classes.
-
-   NOTE: This document assumes that readers already understand how
-   lockdep works without crossrelease thus omits details. But there's
-   one thing to note. Lockdep pretends to pop a lock from held_locks
-   when releasing it. But it's subtly different from the original pop
-   operation because lockdep allows other than the top to be poped.
-
-In this case, lockdep adds 'the top of held_locks -> the lock to acquire'
-dependency every time acquiring a lock.
-
-After adding 'A -> B', a dependency graph will be:
-
-   A -> B
-
-   where A and B are different lock classes.
-
-And after adding 'B -> C', the graph will be:
-
-   A -> B -> C
-
-   where A, B and C are different lock classes.
-
-Let's performs commit step even for typical locks to add dependencies.
-Of course, commit step is not necessary for them, however, it would work
-well because this is a more general way.
-
-   acquire A
-   /*
-    * Queue A into hist_locks
-    *
-    * In hist_locks: A
-    * In graph: Empty
-    */
-
-   acquire B
-   /*
-    * Queue B into hist_locks
-    *
-    * In hist_locks: A, B
-    * In graph: Empty
-    */
-
-   acquire C
-   /*
-    * Queue C into hist_locks
-    *
-    * In hist_locks: A, B, C
-    * In graph: Empty
-    */
-
-   commit C
-   /*
-    * Add 'C -> ?'
-    * Answer the following to decide '?'
-    * What has been queued since acquire C: Nothing
-    *
-    * In hist_locks: A, B, C
-    * In graph: Empty
-    */
-
-   release C
-
-   commit B
-   /*
-    * Add 'B -> ?'
-    * Answer the following to decide '?'
-    * What has been queued since acquire B: C
-    *
-    * In hist_locks: A, B, C
-    * In graph: 'B -> C'
-    */
-
-   release B
-
-   commit A
-   /*
-    * Add 'A -> ?'
-    * Answer the following to decide '?'
-    * What has been queued since acquire A: B, C
-    *
-    * In hist_locks: A, B, C
-    * In graph: 'B -> C', 'A -> B', 'A -> C'
-    */
-
-   release A
-
-   where A, B and C are different lock classes.
-
-In this case, dependencies are added at the commit step as described.
-
-After commits for A, B and C, the graph will be:
-
-   A -> B -> C
-
-   where A, B and C are different lock classes.
-
-   NOTE: A dependency 'A -> C' is optimized out.
-
-We can see the former graph built without commit step is same as the
-latter graph built using commit steps. Of course the former way leads to
-earlier finish for building the graph, which means we can detect a
-deadlock or its possibility sooner. So the former way would be prefered
-when possible. But we cannot avoid using the latter way for crosslocks.
-
-Let's look at how commit steps work for crosslocks. In this case, the
-commit step is performed only on crosslock AX as real. And it assumes
-that the AX release context is different from the AX acquire context.
-
-   BX RELEASE CONTEXT             BX ACQUIRE CONTEXT
-   ------------------             ------------------
-                                  acquire A
-                                  /*
-                                   * Push A onto held_locks
-                                   * Queue A into hist_locks
-                                   *
-                                   * In held_locks: A
-                                   * In hist_locks: A
-                                   * In graph: Empty
-                                   */
-
-                                  acquire BX
-                                  /*
-                                   * Add 'the top of held_locks -> BX'
-                                   *
-                                   * In held_locks: A
-                                   * In hist_locks: A
-                                   * In graph: 'A -> BX'
-                                   */
-
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-   It must be guaranteed that the following operations are seen after
-   acquiring BX globally. It can be done by things like barrier.
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-   acquire C
-   /*
-    * Push C onto held_locks
-    * Queue C into hist_locks
-    *
-    * In held_locks: C
-    * In hist_locks: C
-    * In graph: 'A -> BX'
-    */
-
-   release C
-   /*
-    * Pop C from held_locks
-    *
-    * In held_locks: Empty
-    * In hist_locks: C
-    * In graph: 'A -> BX'
-    */
-                                  acquire D
-                                  /*
-                                   * Push D onto held_locks
-                                   * Queue D into hist_locks
-                                   * Add 'the top of held_locks -> D'
-                                   *
-                                   * In held_locks: A, D
-                                   * In hist_locks: A, D
-                                   * In graph: 'A -> BX', 'A -> D'
-                                   */
-   acquire E
-   /*
-    * Push E onto held_locks
-    * Queue E into hist_locks
-    *
-    * In held_locks: E
-    * In hist_locks: C, E
-    * In graph: 'A -> BX', 'A -> D'
-    */
-
-   release E
-   /*
-    * Pop E from held_locks
-    *
-    * In held_locks: Empty
-    * In hist_locks: D, E
-    * In graph: 'A -> BX', 'A -> D'
-    */
-                                  release D
-                                  /*
-                                   * Pop D from held_locks
-                                   *
-                                   * In held_locks: A
-                                   * In hist_locks: A, D
-                                   * In graph: 'A -> BX', 'A -> D'
-                                   */
-   commit BX
-   /*
-    * Add 'BX -> ?'
-    * What has been queued since acquire BX: C, E
-    *
-    * In held_locks: Empty
-    * In hist_locks: D, E
-    * In graph: 'A -> BX', 'A -> D',
-    *           'BX -> C', 'BX -> E'
-    */
-
-   release BX
-   /*
-    * In held_locks: Empty
-    * In hist_locks: D, E
-    * In graph: 'A -> BX', 'A -> D',
-    *           'BX -> C', 'BX -> E'
-    */
-                                  release A
-                                  /*
-                                   * Pop A from held_locks
-                                   *
-                                   * In held_locks: Empty
-                                   * In hist_locks: A, D
-                                   * In graph: 'A -> BX', 'A -> D',
-                                   *           'BX -> C', 'BX -> E'
-                                   */
-
-   where A, BX, C,..., E are different lock classes, and a suffix 'X' is
-   added on crosslocks.
-
-Crossrelease considers all acquisitions after acqiuring BX are
-candidates which might create dependencies with BX. True dependencies
-will be determined when identifying the release context of BX. Meanwhile,
-all typical locks are queued so that they can be used at the commit step.
-And then two dependencies 'BX -> C' and 'BX -> E' are added at the
-commit step when identifying the release context.
-
-The final graph will be, with crossrelease:
-
-               -> C
-              /
-       -> BX -
-      /       \
-   A -         -> E
-      \
-       -> D
-
-   where A, BX, C,..., E are different lock classes, and a suffix 'X' is
-   added on crosslocks.
-
-However, the final graph will be, without crossrelease:
-
-   A -> D
-
-   where A and D are different lock classes.
-
-The former graph has three more dependencies, 'A -> BX', 'BX -> C' and
-'BX -> E' giving additional opportunities to check if they cause
-deadlocks. This way lockdep can detect a deadlock or its possibility
-caused by crosslocks.
-
-CONCLUSION
-
-We checked how crossrelease works with several examples.
-
-
-=============
-Optimizations
-=============
-
-Avoid duplication
------------------
-
-Crossrelease feature uses a cache like what lockdep already uses for
-dependency chains, but this time it's for caching CT type dependencies.
-Once that dependency is cached, the same will never be added again.
-
-
-Lockless for hot paths
-----------------------
-
-To keep all locks for later use at the commit step, crossrelease adopts
-a local array embedded in task_struct, which makes access to the data
-lockless by forcing it to happen only within the owner context. It's
-like how lockdep handles held_locks. Lockless implmentation is important
-since typical locks are very frequently acquired and released.
-
-
-=================================================
-APPENDIX A: What lockdep does to work aggresively
-=================================================
-
-A deadlock actually occurs when all wait operations creating circular
-dependencies run at the same time. Even though they don't, a potential
-deadlock exists if the problematic dependencies exist. Thus it's
-meaningful to detect not only an actual deadlock but also its potential
-possibility. The latter is rather valuable. When a deadlock occurs
-actually, we can identify what happens in the system by some means or
-other even without lockdep. However, there's no way to detect possiblity
-without lockdep unless the whole code is parsed in head. It's terrible.
-Lockdep does the both, and crossrelease only focuses on the latter.
-
-Whether or not a deadlock actually occurs depends on several factors.
-For example, what order contexts are switched in is a factor. Assuming
-circular dependencies exist, a deadlock would occur when contexts are
-switched so that all wait operations creating the dependencies run
-simultaneously. Thus to detect a deadlock possibility even in the case
-that it has not occured yet, lockdep should consider all possible
-combinations of dependencies, trying to:
-
-1. Use a global dependency graph.
-
-   Lockdep combines all dependencies into one global graph and uses them,
-   regardless of which context generates them or what order contexts are
-   switched in. Aggregated dependencies are only considered so they are
-   prone to be circular if a problem exists.
-
-2. Check dependencies between classes instead of instances.
-
-   What actually causes a deadlock are instances of lock. However,
-   lockdep checks dependencies between classes instead of instances.
-   This way lockdep can detect a deadlock which has not happened but
-   might happen in future by others but the same class.
-
-3. Assume all acquisitions lead to waiting.
-
-   Although locks might be acquired without waiting which is essential
-   to create dependencies, lockdep assumes all acquisitions lead to
-   waiting since it might be true some time or another.
-
-CONCLUSION
-
-Lockdep detects not only an actual deadlock but also its possibility,
-and the latter is more valuable.
-
-
-==================================================
-APPENDIX B: How to avoid adding false dependencies
-==================================================
-
-Remind what a dependency is. A dependency exists if:
-
-   1. There are two waiters waiting for each event at a given time.
-   2. The only way to wake up each waiter is to trigger its event.
-   3. Whether one can be woken up depends on whether the other can.
-
-For example:
-
-   acquire A
-   acquire B /* A dependency 'A -> B' exists */
-   release B
-   release A
-
-   where A and B are different lock classes.
-
-A depedency 'A -> B' exists since:
-
-   1. A waiter for A and a waiter for B might exist when acquiring B.
-   2. Only way to wake up each is to release what it waits for.
-   3. Whether the waiter for A can be woken up depends on whether the
-      other can. IOW, TASK X cannot release A if it fails to acquire B.
-
-For another example:
-
-   TASK X                         TASK Y
-   ------                         ------
-                                  acquire AX
-   acquire B /* A dependency 'AX -> B' exists */
-   release B
-   release AX held by Y
-
-   where AX and B are different lock classes, and a suffix 'X' is added
-   on crosslocks.
-
-Even in this case involving crosslocks, the same rule can be applied. A
-depedency 'AX -> B' exists since:
-
-   1. A waiter for AX and a waiter for B might exist when acquiring B.
-   2. Only way to wake up each is to release what it waits for.
-   3. Whether the waiter for AX can be woken up depends on whether the
-      other can. IOW, TASK X cannot release AX if it fails to acquire B.
-
-Let's take a look at more complicated example:
-
-   TASK X                         TASK Y
-   ------                         ------
-   acquire B
-   release B
-   fork Y
-                                  acquire AX
-   acquire C /* A dependency 'AX -> C' exists */
-   release C
-   release AX held by Y
-
-   where AX, B and C are different lock classes, and a suffix 'X' is
-   added on crosslocks.
-
-Does a dependency 'AX -> B' exist? Nope.
-
-Two waiters are essential to create a dependency. However, waiters for
-AX and B to create 'AX -> B' cannot exist at the same time in this
-example. Thus the dependency 'AX -> B' cannot be created.
-
-It would be ideal if the full set of true ones can be considered. But
-we can ensure nothing but what actually happened. Relying on what
-actually happens at runtime, we can anyway add only true ones, though
-they might be a subset of true ones. It's similar to how lockdep works
-for typical locks. There might be more true dependencies than what
-lockdep has detected in runtime. Lockdep has no choice but to rely on
-what actually happens. Crossrelease also relies on it.
-
-CONCLUSION
-
-Relying on what actually happens, lockdep can avoid adding false
-dependencies.
diff --git a/Documentation/media/dvb-drivers/frontends.rst b/Documentation/media/dvb-drivers/frontends.rst
new file mode 100644 (file)
index 0000000..1f5f579
--- /dev/null
@@ -0,0 +1,30 @@
+****************
+Frontend drivers
+****************
+
+Frontend attach headers
+***********************
+
+.. Keep it on alphabetic order
+
+.. kernel-doc:: drivers/media/dvb-frontends/a8293.h
+.. kernel-doc:: drivers/media/dvb-frontends/af9013.h
+.. kernel-doc:: drivers/media/dvb-frontends/ascot2e.h
+.. kernel-doc:: drivers/media/dvb-frontends/cxd2820r.h
+.. kernel-doc:: drivers/media/dvb-frontends/drxk.h
+.. kernel-doc:: drivers/media/dvb-frontends/dvb-pll.h
+.. kernel-doc:: drivers/media/dvb-frontends/helene.h
+.. kernel-doc:: drivers/media/dvb-frontends/horus3a.h
+.. kernel-doc:: drivers/media/dvb-frontends/ix2505v.h
+.. kernel-doc:: drivers/media/dvb-frontends/m88ds3103.h
+.. kernel-doc:: drivers/media/dvb-frontends/mb86a20s.h
+.. kernel-doc:: drivers/media/dvb-frontends/mn88472.h
+.. kernel-doc:: drivers/media/dvb-frontends/rtl2830.h
+.. kernel-doc:: drivers/media/dvb-frontends/rtl2832.h
+.. kernel-doc:: drivers/media/dvb-frontends/rtl2832_sdr.h
+.. kernel-doc:: drivers/media/dvb-frontends/stb6000.h
+.. kernel-doc:: drivers/media/dvb-frontends/tda10071.h
+.. kernel-doc:: drivers/media/dvb-frontends/tda826x.h
+.. kernel-doc:: drivers/media/dvb-frontends/zd1301_demod.h
+.. kernel-doc:: drivers/media/dvb-frontends/zl10036.h
+
index 3761411..314e127 100644 (file)
@@ -41,4 +41,5 @@ For more details see the file COPYING in the source distribution of Linux.
        technisat
        ttusb-dec
        udev
+       frontends
        contributors
index 66e6208..7d4b159 100644 (file)
@@ -9,6 +9,7 @@ Contents:
    batman-adv
    kapi
    z8530book
+   msg_zerocopy
 
 .. only::  subproject
 
@@ -16,4 +17,3 @@ Contents:
    =======
 
    * :ref:`genindex`
-
index 77f6d7e..291a012 100644 (file)
@@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
        if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
                error(1, errno, "setsockopt zerocopy");
 
+Setting the socket option only works when the socket is in its initial
+(TCP_CLOSED) state.  Trying to set the option for a socket returned by accept(),
+for example, will lead to an EBUSY error. In this case, the option should be set
+to the listening socket and it will be inherited by the accepted sockets.
 
 Transmission
 ------------
index 704cd36..8eaf9ee 100644 (file)
@@ -994,6 +994,17 @@ into D0 going forward), but if it is in runtime suspend in pci_pm_thaw_noirq(),
 the function will set the power.direct_complete flag for it (to make the PM core
 skip the subsequent "thaw" callbacks for it) and return.
 
+Setting the DPM_FLAG_LEAVE_SUSPENDED flag means that the driver prefers the
+device to be left in suspend after system-wide transitions to the working state.
+This flag is checked by the PM core, but the PCI bus type informs the PM core
+which devices may be left in suspend from its perspective (that happens during
+the "noirq" phase of system-wide suspend and analogous transitions) and next it
+uses the dev_pm_may_skip_resume() helper to decide whether or not to return from
+pci_pm_resume_noirq() early, as the PM core will skip the remaining resume
+callbacks for the device during the transition under way and will set its
+runtime PM status to "suspended" if dev_pm_may_skip_resume() returns "true" for
+it.
+
 3.2. Device Runtime Power Management
 ------------------------------------
 In addition to providing device power management callbacks PCI device drivers
index 361789d..aa0a776 100644 (file)
@@ -5,7 +5,6 @@ How to get printk format specifiers right
 :Author: Randy Dunlap <rdunlap@infradead.org>
 :Author: Andrew Murray <amurray@mpc-data.co.uk>
 
-
 Integer types
 =============
 
@@ -45,6 +44,18 @@ return from vsnprintf.
 Raw pointer value SHOULD be printed with %p. The kernel supports
 the following extended format specifiers for pointer types:
 
+Pointer Types
+=============
+
+Pointers printed without a specifier extension (i.e unadorned %p) are
+hashed to give a unique identifier without leaking kernel addresses to user
+space. On 64 bit machines the first 32 bits are zeroed. If you _really_
+want the address see %px below.
+
+::
+
+       %p      abcdef12 or 00000000abcdef12
+
 Symbols/Function Pointers
 =========================
 
@@ -85,18 +96,32 @@ Examples::
        printk("Faulted at %pS\n", (void *)regs->ip);
        printk(" %s%pB\n", (reliable ? "" : "? "), (void *)*stack);
 
-
 Kernel Pointers
 ===============
 
 ::
 
-       %pK     0x01234567 or 0x0123456789abcdef
+       %pK     01234567 or 0123456789abcdef
 
 For printing kernel pointers which should be hidden from unprivileged
 users. The behaviour of ``%pK`` depends on the ``kptr_restrict sysctl`` - see
 Documentation/sysctl/kernel.txt for more details.
 
+Unmodified Addresses
+====================
+
+::
+
+       %px     01234567 or 0123456789abcdef
+
+For printing pointers when you _really_ want to print the address. Please
+consider whether or not you are leaking sensitive information about the
+Kernel layout in memory before printing pointers with %px. %px is
+functionally equivalent to %lx. %px is preferred to %lx because it is more
+uniquely grep'able. If, in the future, we need to modify the way the Kernel
+handles printing pointers it will be nice to be able to find the call
+sites.
+
 Struct Resources
 ================
 
index 6338400..2c31d9e 100644 (file)
@@ -319,12 +319,12 @@ struct Scsi_Host:
         instance. If the reference count reaches 0 then the given instance
         is freed
 
-The Scsi_device structure has had reference counting infrastructure added.
-This effectively spreads the ownership of struct Scsi_device instances
+The scsi_device structure has had reference counting infrastructure added.
+This effectively spreads the ownership of struct scsi_device instances
 across the various SCSI layers which use them. Previously such instances
 were exclusively owned by the mid level. See the access functions declared
 towards the end of include/scsi/scsi_device.h . If an LLD wants to keep
-a copy of a pointer to a Scsi_device instance it should use scsi_device_get()
+a copy of a pointer to a scsi_device instance it should use scsi_device_get()
 to bump its reference count. When it is finished with the pointer it can
 use scsi_device_put() to decrement its reference count (and potentially
 delete it).
index b920423..5025ff9 100644 (file)
@@ -158,10 +158,6 @@ Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
 value lower than this limit will be ignored and the old configuration will be
 retained.
 
-Note: the value of dirty_bytes also must be set greater than
-dirty_background_bytes or the amount of memory corresponding to
-dirty_background_ratio.
-
 ==============================================================
 
 dirty_expire_centisecs
@@ -181,9 +177,6 @@ generating disk writes will itself start writing out dirty data.
 
 The total available memory is not equal to total system memory.
 
-Note: dirty_ratio must be set greater than dirty_background_ratio or
-ratio corresponding to dirty_background_bytes.
-
 ==============================================================
 
 dirty_writeback_centisecs
index 7165358..7df567e 100644 (file)
@@ -26,39 +26,16 @@ the user. The registration APIs returns the cooling device pointer.
    clip_cpus: cpumask of cpus where the frequency constraints will happen.
 
 1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
-       struct device_node *np, const struct cpumask *clip_cpus)
+                                       struct cpufreq_policy *policy)
 
     This interface function registers the cpufreq cooling device with
     the name "thermal-cpufreq-%x" linking it with a device tree node, in
     order to bind it via the thermal DT code. This api can support multiple
     instances of cpufreq cooling devices.
 
-    np: pointer to the cooling device device tree node
-    clip_cpus: cpumask of cpus where the frequency constraints will happen.
+    policy: CPUFreq policy.
 
-1.1.3 struct thermal_cooling_device *cpufreq_power_cooling_register(
-    const struct cpumask *clip_cpus, u32 capacitance,
-    get_static_t plat_static_func)
-
-Similar to cpufreq_cooling_register, this function registers a cpufreq
-cooling device.  Using this function, the cooling device will
-implement the power extensions by using a simple cpu power model.  The
-cpus must have registered their OPPs using the OPP library.
-
-The additional parameters are needed for the power model (See 2. Power
-models).  "capacitance" is the dynamic power coefficient (See 2.1
-Dynamic power).  "plat_static_func" is a function to calculate the
-static power consumed by these cpus (See 2.2 Static power).
-
-1.1.4 struct thermal_cooling_device *of_cpufreq_power_cooling_register(
-    struct device_node *np, const struct cpumask *clip_cpus, u32 capacitance,
-    get_static_t plat_static_func)
-
-Similar to cpufreq_power_cooling_register, this function register a
-cpufreq cooling device with power extensions using the device tree
-information supplied by the np parameter.
-
-1.1.5 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 
     This interface function unregisters the "thermal-cpufreq-%x" cooling device.
 
@@ -67,20 +44,14 @@ information supplied by the np parameter.
 2. Power models
 
 The power API registration functions provide a simple power model for
-CPUs.  The current power is calculated as dynamic + (optionally)
-static power.  This power model requires that the operating-points of
+CPUs.  The current power is calculated as dynamic power (static power isn't
+supported currently).  This power model requires that the operating-points of
 the CPUs are registered using the kernel's opp library and the
 `cpufreq_frequency_table` is assigned to the `struct device` of the
 cpu.  If you are using CONFIG_CPUFREQ_DT then the
 `cpufreq_frequency_table` should already be assigned to the cpu
 device.
 
-The `plat_static_func` parameter of `cpufreq_power_cooling_register()`
-and `of_cpufreq_power_cooling_register()` is optional.  If you don't
-provide it, only dynamic power will be considered.
-
-2.1 Dynamic power
-
 The dynamic power consumption of a processor depends on many factors.
 For a given processor implementation the primary factors are:
 
@@ -119,79 +90,3 @@ mW/MHz/uVolt^2.  Typical values for mobile CPUs might lie in range
 from 100 to 500.  For reference, the approximate values for the SoC in
 ARM's Juno Development Platform are 530 for the Cortex-A57 cluster and
 140 for the Cortex-A53 cluster.
-
-
-2.2 Static power
-
-Static leakage power consumption depends on a number of factors.  For a
-given circuit implementation the primary factors are:
-
-- Time the circuit spends in each 'power state'
-- Temperature
-- Operating voltage
-- Process grade
-
-The time the circuit spends in each 'power state' for a given
-evaluation period at first order means OFF or ON.  However,
-'retention' states can also be supported that reduce power during
-inactive periods without loss of context.
-
-Note: The visibility of state entries to the OS can vary, according to
-platform specifics, and this can then impact the accuracy of a model
-based on OS state information alone.  It might be possible in some
-cases to extract more accurate information from system resources.
-
-The temperature, operating voltage and process 'grade' (slow to fast)
-of the circuit are all significant factors in static leakage power
-consumption.  All of these have complex relationships to static power.
-
-Circuit implementation specific factors include the chosen silicon
-process as well as the type, number and size of transistors in both
-the logic gates and any RAM elements included.
-
-The static power consumption modelling must take into account the
-power managed regions that are implemented.  Taking the example of an
-ARM processor cluster, the modelling would take into account whether
-each CPU can be powered OFF separately or if only a single power
-region is implemented for the complete cluster.
-
-In one view, there are others, a static power consumption model can
-then start from a set of reference values for each power managed
-region (e.g. CPU, Cluster/L2) in each state (e.g. ON, OFF) at an
-arbitrary process grade, voltage and temperature point.  These values
-are then scaled for all of the following: the time in each state, the
-process grade, the current temperature and the operating voltage.
-However, since both implementation specific and complex relationships
-dominate the estimate, the appropriate interface to the model from the
-cpu cooling device is to provide a function callback that calculates
-the static power in this platform.  When registering the cpu cooling
-device pass a function pointer that follows the `get_static_t`
-prototype:
-
-    int plat_get_static(cpumask_t *cpumask, int interval,
-                        unsigned long voltage, u32 &power);
-
-`cpumask` is the cpumask of the cpus involved in the calculation.
-`voltage` is the voltage at which they are operating.  The function
-should calculate the average static power for the last `interval`
-milliseconds.  It returns 0 on success, -E* on error.  If it
-succeeds, it should store the static power in `power`.  Reading the
-temperature of the cpus described by `cpumask` is left for
-plat_get_static() to do as the platform knows best which thermal
-sensor is closest to the cpu.
-
-If `plat_static_func` is NULL, static power is considered to be
-negligible for this platform and only dynamic power is considered.
-
-The platform specific callback can then use any combination of tables
-and/or equations to permute the estimated value.  Process grade
-information is not passed to the model since access to such data, from
-on-chip measurement capability or manufacture time data, is platform
-specific.
-
-Note: the significance of static power for CPUs in comparison to
-dynamic power is highly dependent on implementation.  Given the
-potential complexity in implementation, the importance and accuracy of
-its inclusion when using cpu cooling devices should be assessed on a
-case by case basis.
-
index 441a4b9..5908a21 100644 (file)
@@ -693,7 +693,7 @@ such specification consists of a number of lines with an inverval value
 in each line. The rules stated above are best illustrated with an example:
 
 # mkdir functions/uvc.usb0/control/header/h
-# cd functions/uvc.usb0/control/header/h
+# cd functions/uvc.usb0/control/
 # ln -s header/h class/fs
 # ln -s header/h class/ss
 # mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
index f670e4b..fc3ae95 100644 (file)
@@ -2901,14 +2901,19 @@ userspace buffer and its length:
 
 struct kvm_s390_irq_state {
        __u64 buf;
-       __u32 flags;
+       __u32 flags;        /* will stay unused for compatibility reasons */
        __u32 len;
-       __u32 reserved[4];
+       __u32 reserved[4];  /* will stay unused for compatibility reasons */
 };
 
 Userspace passes in the above struct and for each pending interrupt a
 struct kvm_s390_irq is copied to the provided buffer.
 
+The structure contains a flags and a reserved field for future extensions. As
+the kernel never checked for flags == 0 and QEMU never pre-zeroed flags and
+reserved, these fields can not be used in the future without breaking
+compatibility.
+
 If -ENOBUFS is returned the buffer provided was too small and userspace
 may retry with a bigger buffer.
 
@@ -2932,10 +2937,14 @@ containing a struct kvm_s390_irq_state:
 
 struct kvm_s390_irq_state {
        __u64 buf;
+       __u32 flags;        /* will stay unused for compatibility reasons */
        __u32 len;
-       __u32 pad;
+       __u32 reserved[4];  /* will stay unused for compatibility reasons */
 };
 
+The restrictions for flags and reserved apply as well.
+(see KVM_S390_GET_IRQ_STATE)
+
 The userspace memory referenced by buf contains a struct kvm_s390_irq
 for each interrupt to be injected into the guest.
 If one of the interrupts could not be injected for some reason the
@@ -3394,6 +3403,52 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
 or if no page table is present for the addresses (e.g. when using
 hugepages).
 
+4.108 KVM_PPC_GET_CPU_CHAR
+
+Capability: KVM_CAP_PPC_GET_CPU_CHAR
+Architectures: powerpc
+Type: vm ioctl
+Parameters: struct kvm_ppc_cpu_char (out)
+Returns: 0 on successful completion
+        -EFAULT if struct kvm_ppc_cpu_char cannot be written
+
+This ioctl gives userspace information about certain characteristics
+of the CPU relating to speculative execution of instructions and
+possible information leakage resulting from speculative execution (see
+CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754).  The information is
+returned in struct kvm_ppc_cpu_char, which looks like this:
+
+struct kvm_ppc_cpu_char {
+       __u64   character;              /* characteristics of the CPU */
+       __u64   behaviour;              /* recommended software behaviour */
+       __u64   character_mask;         /* valid bits in character */
+       __u64   behaviour_mask;         /* valid bits in behaviour */
+};
+
+For extensibility, the character_mask and behaviour_mask fields
+indicate which bits of character and behaviour have been filled in by
+the kernel.  If the set of defined bits is extended in future then
+userspace will be able to tell whether it is running on a kernel that
+knows about the new bits.
+
+The character field describes attributes of the CPU which can help
+with preventing inadvertent information disclosure - specifically,
+whether there is an instruction to flash-invalidate the L1 data cache
+(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set
+to a mode where entries can only be used by the thread that created
+them, whether the bcctr[l] instruction prevents speculation, and
+whether a speculation barrier instruction (ori 31,31,0) is provided.
+
+The behaviour field describes actions that software should take to
+prevent inadvertent information disclosure, and thus describes which
+vulnerabilities the hardware is subject to; specifically whether the
+L1 data cache should be flushed when returning to user mode from the
+kernel, and whether a speculation barrier should be placed between an
+array bounds check and the array access.
+
+These fields use the same bit definitions as the new
+H_GET_CPU_CHARACTERISTICS hypercall.
+
 5. The kvm_run structure
 ------------------------
 
index 89fff7d..0b3a114 100644 (file)
@@ -98,5 +98,25 @@ request is made for a page in an old zpool, it is uncompressed using its
 original compressor.  Once all pages are removed from an old zpool, the zpool
 and its compressor are freed.
 
+Some of the pages in zswap are same-value filled pages (i.e. contents of the
+page have same value or repetitive pattern). These pages include zero-filled
+pages and they are handled differently. During store operation, a page is
+checked if it is a same-value filled page before compressing it. If true, the
+compressed length of the page is set to zero and the pattern or same-filled
+value is stored.
+
+Same-value filled pages identification feature is enabled by default and can be
+disabled at boot time by setting the "same_filled_pages_enabled" attribute to 0,
+e.g. zswap.same_filled_pages_enabled=0. It can also be enabled and disabled at
+runtime using the sysfs "same_filled_pages_enabled" attribute, e.g.
+
+echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled
+
+When zswap same-filled page identification is disabled at runtime, it will stop
+checking for the same-value filled pages during store operation. However, the
+existing pages which are marked as same-value filled pages remain stored
+unchanged in zswap until they are either loaded or invalidated.
+
 A debugfs interface is provided for various statistic about pool size, number
-of pages stored, and various counters for the reasons pages are rejected.
+of pages stored, same-value filled pages and various counters for the reasons
+pages are rejected.
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
new file mode 100644 (file)
index 0000000..5cd5843
--- /dev/null
@@ -0,0 +1,186 @@
+Overview
+========
+
+Page Table Isolation (pti, previously known as KAISER[1]) is a
+countermeasure against attacks on the shared user/kernel address
+space such as the "Meltdown" approach[2].
+
+To mitigate this class of attacks, we create an independent set of
+page tables for use only when running userspace applications.  When
+the kernel is entered via syscalls, interrupts or exceptions, the
+page tables are switched to the full "kernel" copy.  When the system
+switches back to user mode, the user copy is used again.
+
+The userspace page tables contain only a minimal amount of kernel
+data: only what is needed to enter/exit the kernel such as the
+entry/exit functions themselves and the interrupt descriptor table
+(IDT).  There are a few strictly unnecessary things that get mapped
+such as the first C function when entering an interrupt (see
+comments in pti.c).
+
+This approach helps to ensure that side-channel attacks leveraging
+the paging structures do not function when PTI is enabled.  It can be
+enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time.
+Once enabled at compile-time, it can be disabled at boot with the
+'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt).
+
+Page Table Management
+=====================
+
+When PTI is enabled, the kernel manages two sets of page tables.
+The first set is very similar to the single set which is present in
+kernels without PTI.  This includes a complete mapping of userspace
+that the kernel can use for things like copy_to_user().
+
+Although _complete_, the user portion of the kernel page tables is
+crippled by setting the NX bit in the top level.  This ensures
+that any missed kernel->user CR3 switch will immediately crash
+userspace upon executing its first instruction.
+
+The userspace page tables map only the kernel data needed to enter
+and exit the kernel.  This data is entirely contained in the 'struct
+cpu_entry_area' structure which is placed in the fixmap which gives
+each CPU's copy of the area a compile-time-fixed virtual address.
+
+For new userspace mappings, the kernel makes the entries in its
+page tables like normal.  The only difference is when the kernel
+makes entries in the top (PGD) level.  In addition to setting the
+entry in the main kernel PGD, a copy of the entry is made in the
+userspace page tables' PGD.
+
+This sharing at the PGD level also inherently shares all the lower
+layers of the page tables.  This leaves a single, shared set of
+userspace page tables to manage.  One PTE to lock, one set of
+accessed bits, dirty bits, etc...
+
+Overhead
+========
+
+Protection against side-channel attacks is important.  But,
+this protection comes at a cost:
+
+1. Increased Memory Use
+  a. Each process now needs an order-1 PGD instead of order-0.
+     (Consumes an additional 4k per process).
+  b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
+     aligned so that it can be mapped by setting a single PMD
+     entry.  This consumes nearly 2MB of RAM once the kernel
+     is decompressed, but no space in the kernel image itself.
+
+2. Runtime Cost
+  a. CR3 manipulation to switch between the page table copies
+     must be done at interrupt, syscall, and exception entry
+     and exit (it can be skipped when the kernel is interrupted,
+     though.)  Moves to CR3 are on the order of a hundred
+     cycles, and are required at every entry and exit.
+  b. A "trampoline" must be used for SYSCALL entry.  This
+     trampoline depends on a smaller set of resources than the
+     non-PTI SYSCALL entry code, so requires mapping fewer
+     things into the userspace page tables.  The downside is
+     that stacks must be switched at entry time.
+  c. Global pages are disabled for all kernel structures not
+     mapped into both kernel and userspace page tables.  This
+     feature of the MMU allows different processes to share TLB
+     entries mapping the kernel.  Losing the feature means more
+     TLB misses after a context switch.  The actual loss of
+     performance is very small, however, never exceeding 1%.
+  d. Process Context IDentifiers (PCID) is a CPU feature that
+     allows us to skip flushing the entire TLB when switching page
+     tables by setting a special bit in CR3 when the page tables
+     are changed.  This makes switching the page tables (at context
+     switch, or kernel entry/exit) cheaper.  But, on systems with
+     PCID support, the context switch code must flush both the user
+     and kernel entries out of the TLB.  The user PCID TLB flush is
+     deferred until the exit to userspace, minimizing the cost.
+     See intel.com/sdm for the gory PCID/INVPCID details.
+  e. The userspace page tables must be populated for each new
+     process.  Even without PTI, the shared kernel mappings
+     are created by copying top-level (PGD) entries into each
+     new process.  But, with PTI, there are now *two* kernel
+     mappings: one in the kernel page tables that maps everything
+     and one for the entry/exit structures.  At fork(), we need to
+     copy both.
+  f. In addition to the fork()-time copying, there must also
+     be an update to the userspace PGD any time a set_pgd() is done
+     on a PGD used to map userspace.  This ensures that the kernel
+     and userspace copies always map the same userspace
+     memory.
+  g. On systems without PCID support, each CR3 write flushes
+     the entire TLB.  That means that each syscall, interrupt
+     or exception flushes the TLB.
+  h. INVPCID is a TLB-flushing instruction which allows flushing
+     of TLB entries for non-current PCIDs.  Some systems support
+     PCIDs, but do not support INVPCID.  On these systems, addresses
+     can only be flushed from the TLB for the current PCID.  When
+     flushing a kernel address, we need to flush all PCIDs, so a
+     single kernel address flush will require a TLB-flushing CR3
+     write upon the next use of every PCID.
+
+Possible Future Work
+====================
+1. We can be more careful about not actually writing to CR3
+   unless its value is actually changed.
+2. Allow PTI to be enabled/disabled at runtime in addition to the
+   boot-time switching.
+
+Testing
+========
+
+To test stability of PTI, the following test procedure is recommended,
+ideally doing all of these in parallel:
+
+1. Set CONFIG_DEBUG_ENTRY=y
+2. Run several copies of all of the tools/testing/selftests/x86/ tests
+   (excluding MPX and protection_keys) in a loop on multiple CPUs for
+   several minutes.  These tests frequently uncover corner cases in the
+   kernel entry code.  In general, old kernels might cause these tests
+   themselves to crash, but they should never crash the kernel.
+3. Run the 'perf' tool in a mode (top or record) that generates many
+   frequent performance monitoring non-maskable interrupts (see "NMI"
+   in /proc/interrupts).  This exercises the NMI entry/exit code which
+   is known to trigger bugs in code paths that did not expect to be
+   interrupted, including nested NMIs.  Using "-c" boosts the rate of
+   NMIs, and using two -c with separate counters encourages nested NMIs
+   and less deterministic behavior.
+
+       while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
+
+4. Launch a KVM virtual machine.
+5. Run 32-bit binaries on systems supporting the SYSCALL instruction.
+   This has been a lightly-tested code path and needs extra scrutiny.
+
+Debugging
+=========
+
+Bugs in PTI cause a few different signatures of crashes
+that are worth noting here.
+
+ * Failures of the selftests/x86 code.  Usually a bug in one of the
+   more obscure corners of entry_64.S
+ * Crashes in early boot, especially around CPU bringup.  Bugs
+   in the trampoline code or mappings cause these.
+ * Crashes at the first interrupt.  Caused by bugs in entry_64.S,
+   like screwing up a page table switch.  Also caused by
+   incorrectly mapping the IRQ handler entry code.
+ * Crashes at the first NMI.  The NMI code is separate from main
+   interrupt handlers and can have bugs that do not affect
+   normal interrupts.  Also caused by incorrectly mapping NMI
+   code.  NMIs that interrupt the entry code must be very
+   careful and can be the cause of crashes that show up when
+   running perf.
+ * Kernel crashes at the first exit to userspace.  entry_64.S
+   bugs, or failing to map some of the exit code.
+ * Crashes at first interrupt that interrupts userspace. The paths
+   in entry_64.S that return to userspace are sometimes separate
+   from the ones that return to the kernel.
+ * Double faults: overflowing the kernel stack because of page
+   faults upon page faults.  Caused by touching non-pti-mapped
+   data in the entry code, or forgetting to switch to kernel
+   CR3 before calling into C functions which are not pti-mapped.
+ * Userspace segfaults early in boot, sometimes manifesting
+   as mount(8) failing to mount the rootfs.  These have
+   tended to be TLB invalidation issues.  Usually invalidating
+   the wrong PCID, or otherwise missing an invalidation.
+
+1. https://gruss.cc/files/kaiser.pdf
+2. https://meltdownattack.com/meltdown.pdf
index 3448e67..ea91cb6 100644 (file)
@@ -1,6 +1,4 @@
 
-<previous description obsolete, deleted>
-
 Virtual memory map with 4 level page tables:
 
 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
@@ -14,13 +12,17 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
 ... unused hole ...
 ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
 ... unused hole ...
+                                   vaddr_end for KASLR
+fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
+fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ... unused hole ...
 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffff9fffffff (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable)
-ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
+ffffffffa0000000 - [fixmap start]   (~1526 MB) module mapping space (variable)
+[fixmap start]   - ffffffffff5fffff kernel-internal fixmap range
+ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
 Virtual memory map with 5 level page tables:
@@ -29,26 +31,31 @@ Virtual memory map with 5 level page tables:
 hole caused by [56:63] sign extension
 ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor
 ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory
-ff90000000000000 - ff91ffffffffffff (=49 bits) hole
-ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
+ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI
+ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB)
 ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
 ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
 ... unused hole ...
 ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
+... unused hole ...
+                                   vaddr_end for KASLR
+fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
 ... unused hole ...
 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ... unused hole ...
 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffff9fffffff (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
-ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
+ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
+[fixmap start]   - ffffffffff5fffff kernel-internal fixmap range
+ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
 Architecture defines a 64-bit virtual address. Implementations can support
 less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
-through to the most-significant implemented bit are set to either all ones
-or all zero. This causes hole between user space and kernel addresses.
+through to the most-significant implemented bit are sign extended.
+This causes hole between user space and kernel addresses if you interpret them
+as unsigned.
 
 The direct mapping covers all memory in the system up to the highest
 memory address (this means in some cases it can also include PCI memory
@@ -58,19 +65,15 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of
 the processes using the page fault handler, with init_top_pgt as
 reference.
 
-Current X86-64 implementations support up to 46 bits of address space (64 TB),
-which is our current limit. This expands into MBZ space in the page tables.
-
 We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
 memory window (this size is arbitrary, it can be raised later if needed).
 The mappings are not part of any other kernel PGD and are only available
 during EFI runtime calls.
 
-The module mapping space size changes based on the CONFIG requirements for the
-following fixmap section.
-
 Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
 physical memory, vmalloc/ioremap space and virtual memory map are randomized.
 Their order is preserved but their base will be offset early at boot time.
 
--Andi Kleen, Jul 2004
+Be very careful vs. KASLR when changing anything here. The KASLR address
+range must not overlap with anything except the KASAN shadow area, which is
+correct as KASAN disables KASLR.
index 48ae3a4..0b52491 100644 (file)
@@ -62,7 +62,15 @@ trivial patch so apply some common sense.
 
 7.     When sending security related changes or reports to a maintainer
        please Cc: security@kernel.org, especially if the maintainer
-       does not respond.
+       does not respond. Please keep in mind that the security team is
+       a small set of people who can be efficient only when working on
+       verified bugs. Please only Cc: this list when you have identified
+       that the bug would present a short-term risk to other users if it
+       were publicly disclosed. For example, reports of address leaks do
+       not represent an immediate threat and are better handled publicly,
+       and ideally, should come with a patch proposal. Please do not send
+       automated reports to this list either. Such bugs will be handled
+       better and faster in the usual public places.
 
 8.     Happy hacking.
 
@@ -321,7 +329,7 @@ F:  drivers/acpi/apei/
 
 ACPI COMPONENT ARCHITECTURE (ACPICA)
 M:     Robert Moore <robert.moore@intel.com>
-M:     Lv Zheng <lv.zheng@intel.com>
+M:     Erik Schmauss <erik.schmauss@intel.com>
 M:     "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
 L:     linux-acpi@vger.kernel.org
 L:     devel@acpica.org
@@ -554,13 +562,13 @@ S:        Orphan
 F:     Documentation/filesystems/affs.txt
 F:     fs/affs/
 
-AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN
+AFS FILESYSTEM
 M:     David Howells <dhowells@redhat.com>
 L:     linux-afs@lists.infradead.org
 S:     Supported
 F:     fs/afs/
-F:     include/net/af_rxrpc.h
-F:     net/rxrpc/af_rxrpc.c
+F:     include/trace/events/afs.h
+F:     Documentation/filesystems/afs.txt
 W:     https://www.infradead.org/~dhowells/kafs/
 
 AGPGART DRIVER
@@ -859,7 +867,8 @@ F:  kernel/configs/android*
 ANDROID DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:     Arve HjønnevÃ¥g <arve@android.com>
-M:     Riley Andrews <riandrews@android.com>
+M:     Todd Kjos <tkjos@android.com>
+M:     Martijn Coenen <maco@android.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
 L:     devel@driverdev.osuosl.org
 S:     Supported
@@ -1582,6 +1591,7 @@ F:        arch/arm/boot/dts/kirkwood*
 F:     arch/arm/configs/mvebu_*_defconfig
 F:     arch/arm/mach-mvebu/
 F:     arch/arm64/boot/dts/marvell/armada*
+F:     drivers/cpufreq/armada-37xx-cpufreq.c
 F:     drivers/cpufreq/mvebu-cpufreq.c
 F:     drivers/irqchip/irq-armada-370-xp.c
 F:     drivers/irqchip/irq-mvebu-*
@@ -2046,7 +2056,7 @@ F:        arch/arm/boot/dts/uniphier*
 F:     arch/arm/include/asm/hardware/cache-uniphier.h
 F:     arch/arm/mach-uniphier/
 F:     arch/arm/mm/cache-uniphier.c
-F:     arch/arm64/boot/dts/socionext/
+F:     arch/arm64/boot/dts/socionext/uniphier*
 F:     drivers/bus/uniphier-system-bus.c
 F:     drivers/clk/uniphier/
 F:     drivers/gpio/gpio-uniphier.c
@@ -2613,24 +2623,22 @@ F:      fs/bfs/
 F:     include/uapi/linux/bfs_fs.h
 
 BLACKFIN ARCHITECTURE
-M:     Steven Miao <realmz6@gmail.com>
 L:     adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 T:     git git://git.code.sf.net/p/adi-linux/code
 W:     http://blackfin.uclinux.org
-S:     Supported
+S:     Orphan
 F:     arch/blackfin/
 
 BLACKFIN EMAC DRIVER
 L:     adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org
-S:     Supported
+S:     Orphan
 F:     drivers/net/ethernet/adi/
 
 BLACKFIN MEDIA DRIVER
-M:     Scott Jiang <scott.jiang.linux@gmail.com>
 L:     adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org/
-S:     Supported
+S:     Orphan
 F:     drivers/media/platform/blackfin/
 F:     drivers/media/i2c/adv7183*
 F:     drivers/media/i2c/vs6624*
@@ -2638,25 +2646,25 @@ F:      drivers/media/i2c/vs6624*
 BLACKFIN RTC DRIVER
 L:     adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org
-S:     Supported
+S:     Orphan
 F:     drivers/rtc/rtc-bfin.c
 
 BLACKFIN SDH DRIVER
 L:     adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org
-S:     Supported
+S:     Orphan
 F:     drivers/mmc/host/bfin_sdh.c
 
 BLACKFIN SERIAL DRIVER
 L:     adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org
-S:     Supported
+S:     Orphan
 F:     drivers/tty/serial/bfin_uart.c
 
 BLACKFIN WATCHDOG DRIVER
 L:     adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org
-S:     Supported
+S:     Orphan
 F:     drivers/watchdog/bfin_wdt.c
 
 BLINKM RGB LED DRIVER
@@ -5143,15 +5151,15 @@ F:      sound/usb/misc/ua101.c
 EFI TEST DRIVER
 L:     linux-efi@vger.kernel.org
 M:     Ivan Hu <ivan.hu@canonical.com>
-M:     Matt Fleming <matt@codeblueprint.co.uk>
+M:     Ard Biesheuvel <ard.biesheuvel@linaro.org>
 S:     Maintained
 F:     drivers/firmware/efi/test/
 
 EFI VARIABLE FILESYSTEM
 M:     Matthew Garrett <matthew.garrett@nebula.com>
 M:     Jeremy Kerr <jk@ozlabs.org>
-M:     Matt Fleming <matt@codeblueprint.co.uk>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
+M:     Ard Biesheuvel <ard.biesheuvel@linaro.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
 L:     linux-efi@vger.kernel.org
 S:     Maintained
 F:     fs/efivarfs/
@@ -5312,7 +5320,6 @@ S:        Supported
 F:     security/integrity/evm/
 
 EXTENSIBLE FIRMWARE INTERFACE (EFI)
-M:     Matt Fleming <matt@codeblueprint.co.uk>
 M:     Ard Biesheuvel <ard.biesheuvel@linaro.org>
 L:     linux-efi@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
@@ -5423,7 +5430,7 @@ F:        drivers/media/tuners/fc2580*
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
 M:     Johannes Thumshirn <jth@kernel.org>
-L:     fcoe-devel@open-fcoe.org
+L:     linux-scsi@vger.kernel.org
 W:     www.Open-FCoE.org
 S:     Supported
 F:     drivers/scsi/libfc/
@@ -6167,7 +6174,6 @@ M:        Jean Delvare <jdelvare@suse.com>
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
 W:     http://hwmon.wiki.kernel.org/
-T:     quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:     Maintained
 F:     Documentation/hwmon/
@@ -6605,16 +6611,6 @@ L:       linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/i2c-stub.c
 
-i386 BOOT CODE
-M:     "H. Peter Anvin" <hpa@zytor.com>
-S:     Maintained
-F:     arch/x86/boot/
-
-i386 SETUP CODE / CPU ERRATA WORKAROUNDS
-M:     "H. Peter Anvin" <hpa@zytor.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git
-S:     Maintained
-
 IA64 (Itanium) PLATFORM
 M:     Tony Luck <tony.luck@intel.com>
 M:     Fenghua Yu <fenghua.yu@intel.com>
@@ -7760,6 +7756,7 @@ F:        security/keys/
 
 KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
+M:     Daniel Thompson <daniel.thompson@linaro.org>
 W:     http://kgdb.wiki.kernel.org/
 L:     kgdb-bugreport@lists.sourceforge.net
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
@@ -9095,6 +9092,7 @@ F:        drivers/usb/image/microtek.*
 
 MIPS
 M:     Ralf Baechle <ralf@linux-mips.org>
+M:     James Hogan <jhogan@kernel.org>
 L:     linux-mips@linux-mips.org
 W:     http://www.linux-mips.org/
 T:     git git://git.linux-mips.org/pub/scm/ralf/linux.git
@@ -9339,9 +9337,9 @@ F:        drivers/gpu/drm/mxsfb/
 F:     Documentation/devicetree/bindings/display/mxsfb-drm.txt
 
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M:     Hyong-Youb Kim <hykim@myri.com>
+M:     Chris Lee <christopher.lee@cspi.com>
 L:     netdev@vger.kernel.org
-W:     https://www.myricom.com/support/downloads/myri10ge.html
+W:     https://www.cspi.com/ethernet-products/support/downloads/
 S:     Supported
 F:     drivers/net/ethernet/myricom/myri10ge/
 
@@ -9648,8 +9646,8 @@ F:        include/uapi/linux/sunrpc/
 NILFS2 FILESYSTEM
 M:     Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
 L:     linux-nilfs@vger.kernel.org
-W:     http://nilfs.sourceforge.net/
-W:     http://nilfs.osdn.jp/
+W:     https://nilfs.sourceforge.io/
+W:     https://nilfs.osdn.jp/
 T:     git git://github.com/konis/nilfs2.git
 S:     Supported
 F:     Documentation/filesystems/nilfs2.txt
@@ -9813,6 +9811,7 @@ NXP TFA9879 DRIVER
 M:     Peter Rosin <peda@axentia.se>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
+F:     Documentation/devicetree/bindings/sound/tfa9879.txt
 F:     sound/soc/codecs/tfa9879*
 
 NXP-NCI NFC DRIVER
@@ -10144,7 +10143,7 @@ F:      drivers/irqchip/irq-ompic.c
 F:     drivers/irqchip/irq-or1k-*
 
 OPENVSWITCH
-M:     Pravin Shelar <pshelar@nicira.com>
+M:     Pravin B Shelar <pshelar@ovn.org>
 L:     netdev@vger.kernel.org
 L:     dev@openvswitch.org
 W:     http://openvswitch.org
@@ -10899,6 +10898,7 @@ F:      include/linux/pm.h
 F:     include/linux/pm_*
 F:     include/linux/powercap.h
 F:     drivers/powercap/
+F:     kernel/configs/nopm.config
 
 POWER STATE COORDINATION INTERFACE (PSCI)
 M:     Mark Rutland <mark.rutland@arm.com>
@@ -11660,8 +11660,8 @@ F:      drivers/mtd/nand/r852.h
 RISC-V ARCHITECTURE
 M:     Palmer Dabbelt <palmer@sifive.com>
 M:     Albert Ou <albert@sifive.com>
-L:     patches@groups.riscv.org
-T:     git https://github.com/riscv/riscv-linux
+L:     linux-riscv@lists.infradead.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git
 S:     Supported
 F:     arch/riscv/
 K:     riscv
@@ -11785,6 +11785,18 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
 S:     Maintained
 F:     drivers/net/wireless/realtek/rtl8xxxu/
 
+RXRPC SOCKETS (AF_RXRPC)
+M:     David Howells <dhowells@redhat.com>
+L:     linux-afs@lists.infradead.org
+S:     Supported
+F:     net/rxrpc/
+F:     include/keys/rxrpc-type.h
+F:     include/net/af_rxrpc.h
+F:     include/trace/events/rxrpc.h
+F:     include/uapi/linux/rxrpc.h
+F:     Documentation/networking/rxrpc.txt
+W:     https://www.infradead.org/~dhowells/kafs/
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:     Antonino Daplas <adaplas@gmail.com>
 L:     linux-fbdev@vger.kernel.org
@@ -12230,7 +12242,7 @@ M:      Security Officers <security@kernel.org>
 S:     Supported
 
 SECURITY SUBSYSTEM
-M:     James Morris <james.l.morris@oracle.com>
+M:     James Morris <jmorris@namei.org>
 M:     "Serge E. Hallyn" <serge@hallyn.com>
 L:     linux-security-module@vger.kernel.org (suggested Cc:)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
@@ -12589,6 +12601,12 @@ F:     include/media/soc*
 F:     drivers/media/i2c/soc_camera/
 F:     drivers/media/platform/soc_camera/
 
+SOCIONEXT UNIPHIER SOUND DRIVER
+M:     Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
+S:     Maintained
+F:     sound/soc/uniphier/
+
 SOEKRIS NET48XX LED SUPPORT
 M:     Chris Boot <bootc@bootc.net>
 S:     Maintained
@@ -12638,6 +12656,14 @@ S:     Maintained
 F:     drivers/ssb/
 F:     include/linux/ssb/
 
+SONY IMX274 SENSOR DRIVER
+M:     Leon Luo <leonl@leopardimaging.com>
+L:     linux-media@vger.kernel.org
+T:     git git://linuxtv.org/media_tree.git
+S:     Maintained
+F:     drivers/media/i2c/imx274.c
+F:     Documentation/devicetree/bindings/media/i2c/imx274.txt
+
 SONY MEMORYSTICK CARD SUPPORT
 M:     Alex Dubov <oakad@yahoo.com>
 W:     http://tifmxx.berlios.de/
@@ -13104,6 +13130,7 @@ F:      drivers/dma/dw/
 
 SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
 M:     Jie Deng <jiedeng@synopsys.com>
+M:     Jose Abreu <Jose.Abreu@synopsys.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/synopsys/
@@ -13479,6 +13506,7 @@ M:      Mika Westerberg <mika.westerberg@linux.intel.com>
 M:     Yehezkel Bernat <yehezkel.bernat@intel.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
 S:     Maintained
+F:     Documentation/admin-guide/thunderbolt.rst
 F:     drivers/thunderbolt/
 F:     include/linux/thunderbolt.h
 
@@ -13656,10 +13684,8 @@ F:     drivers/net/wireless/ti/
 F:     include/linux/wl12xx.h
 
 TILE ARCHITECTURE
-M:     Chris Metcalf <cmetcalf@mellanox.com>
 W:     http://www.mellanox.com/repository/solutions/tile-scm/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
-S:     Supported
+S:     Orphan
 F:     arch/tile/
 F:     drivers/char/tile-srom.c
 F:     drivers/edac/tile_edac.c
@@ -13839,6 +13865,13 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
 S:     Maintained
 K:     ^Subject:.*(?i)trivial
 
+TEMPO SEMICONDUCTOR DRIVERS
+M:     Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+S:     Maintained
+F:     sound/soc/codecs/tscs*.c
+F:     sound/soc/codecs/tscs*.h
+F:     Documentation/devicetree/bindings/sound/tscs*.txt
+
 TTY LAYER
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:     Jiri Slaby <jslaby@suse.com>
@@ -14847,7 +14880,7 @@ F:      net/x25/
 X86 ARCHITECTURE (32-BIT AND 64-BIT)
 M:     Thomas Gleixner <tglx@linutronix.de>
 M:     Ingo Molnar <mingo@redhat.com>
-M:     "H. Peter Anvin" <hpa@zytor.com>
+R:     "H. Peter Anvin" <hpa@zytor.com>
 M:     x86@kernel.org
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
index f761bf4..c8b8e90 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -484,26 +484,6 @@ CLANG_GCC_TC       := --gcc-toolchain=$(GCC_TOOLCHAIN)
 endif
 KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
 KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
-# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
-else
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
 endif
 
 ifeq ($(config-targets),1)
@@ -716,6 +696,29 @@ ifdef CONFIG_CC_STACKPROTECTOR
 endif
 KBUILD_CFLAGS += $(stackp-flag)
 
+ifeq ($(cc-name),clang)
+KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+# Quiet clang warning: comparison of unsigned expression < 0 is always false
+KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
+# source of a reference will be _MergedGlobals and not on of the whitelisted names.
+# See modpost pattern 2
+KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+else
+
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+endif
+
 ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
 else
@@ -789,6 +792,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS  += $(call cc-option,-fno-strict-overflow)
 
+# Make sure -fstack-check isn't enabled (like gentoo apparently did)
+KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
+
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
index 400b9e1..a26d6f8 100644 (file)
@@ -234,8 +234,8 @@ config ARCH_HAS_FORTIFY_SOURCE
 config ARCH_HAS_SET_MEMORY
        bool
 
-# Select if arch init_task initializer is different to init/init_task.c
-config ARCH_INIT_TASK
+# Select if arch init_task must go in the __init_task_data section
+config ARCH_TASK_STRUCT_ON_STACK
        bool
 
 # Select if arch has its private alloc_task_struct() function
index 8c20c5e..807d7b9 100644 (file)
@@ -39,9 +39,6 @@ struct thread_info {
        .preempt_count  = INIT_PREEMPT_COUNT,   \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* How to get the thread information struct from C.  */
 register struct thread_info *__current_thread_info __asm__("$8");
 #define current_thread_info()  __current_thread_info
index b15bf6b..14a2e9a 100644 (file)
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bpf_perf_event.h
index 37bd6d9..a6bdc1d 100644 (file)
@@ -102,6 +102,15 @@ sio_pci_route(void)
                                   alpha_mv.sys.sio.route_tab);
 }
 
+static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
+{
+       if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
+           (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
+               return false;
+
+       return true;
+}
+
 static unsigned int __init
 sio_collect_irq_levels(void)
 {
@@ -110,8 +119,7 @@ sio_collect_irq_levels(void)
 
        /* Iterate through the devices, collecting IRQ levels.  */
        for_each_pci_dev(dev) {
-               if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
-                   (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
+               if (!sio_pci_dev_irq_needs_level(dev))
                        continue;
 
                if (dev->irq)
@@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
        return level_bits;
 }
 
-static void __init
-sio_fixup_irq_levels(unsigned int level_bits)
+static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
 {
        unsigned int old_level_bits;
 
@@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
         */
        old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
 
-       level_bits |= (old_level_bits & 0x71ff);
+       if (reset)
+               old_level_bits &= 0x71ff;
+
+       level_bits |= old_level_bits;
 
        outb((level_bits >> 0) & 0xff, 0x4d0);
        outb((level_bits >> 8) & 0xff, 0x4d1);
 }
 
+static inline void
+sio_fixup_irq_levels(unsigned int level_bits)
+{
+       __sio_fixup_irq_levels(level_bits, true);
+}
+
 static inline int
 noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
@@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
        int irq = COMMON_TABLE_LOOKUP, tmp;
        tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
-       return irq >= 0 ? tmp : -1;
+
+       irq = irq >= 0 ? tmp : -1;
+
+       /* Fixup IRQ level if an actual IRQ mapping is detected */
+       if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
+               __sio_fixup_irq_levels(1 << irq, false);
+
+       return irq;
 }
 
 static inline int
index 316a99a..1cfcfbb 100644 (file)
@@ -18,7 +18,7 @@
  * The algorithm for the leading and trailing quadwords remains the same,
  * however the loop has been unrolled to enable better memory throughput,
  * and the code has been replicated for each of the entry points: __memset
- * and __memsetw to permit better scheduling to eliminate the stalling
+ * and __memset16 to permit better scheduling to eliminate the stalling
  * encountered during the mask replication.
  * A future enhancement might be to put in a byte store loop for really
  * small (say < 32 bytes) memset()s.  Whether or not that change would be
@@ -34,7 +34,7 @@
        .globl memset
        .globl __memset
        .globl ___memset
-       .globl __memsetw
+       .globl __memset16
        .globl __constant_c_memset
 
        .ent ___memset
@@ -415,9 +415,9 @@ end:
         * to mask stalls.  Note that entry point names also had to change
         */
        .align 5
-       .ent __memsetw
+       .ent __memset16
 
-__memsetw:
+__memset16:
        .frame $30,0,$26,0
        .prologue 0
 
@@ -596,8 +596,8 @@ end_w:
        nop
        ret $31,($26),1         # L0 :
 
-       .end __memsetw
-       EXPORT_SYMBOL(__memsetw)
+       .end __memset16
+       EXPORT_SYMBOL(__memset16)
 
 memset = ___memset
 __memset = ___memset
index 4e6e9f5..dc91c66 100644 (file)
                        reg = <0x80 0x10>, <0x100 0x10>;
                        #clock-cells = <0>;
                        clocks = <&input_clk>;
+
+                       /*
+                        * Set initial core pll output frequency to 90MHz.
+                        * It will be applied at the core pll driver probing
+                        * on early boot.
+                        */
+                       assigned-clocks = <&core_clk>;
+                       assigned-clock-rates = <90000000>;
                };
 
                core_intc: archs-intc@cpu {
index 63954a8..69ff489 100644 (file)
                        reg = <0x80 0x10>, <0x100 0x10>;
                        #clock-cells = <0>;
                        clocks = <&input_clk>;
+
+                       /*
+                        * Set initial core pll output frequency to 100MHz.
+                        * It will be applied at the core pll driver probing
+                        * on early boot.
+                        */
+                       assigned-clocks = <&core_clk>;
+                       assigned-clock-rates = <100000000>;
                };
 
                core_intc: archs-intc@cpu {
index 8f627c2..006aa3d 100644 (file)
                        reg = <0x00 0x10>, <0x14B8 0x4>;
                        #clock-cells = <0>;
                        clocks = <&input_clk>;
+
+                       /*
+                        * Set initial core pll output frequency to 1GHz.
+                        * It will be applied at the core pll driver probing
+                        * on early boot.
+                        */
+                       assigned-clocks = <&core_clk>;
+                       assigned-clock-rates = <1000000000>;
                };
 
                serial: serial@5000 {
index 7b8f8fa..ac6b0ed 100644 (file)
@@ -49,10 +49,11 @@ CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_DRM_UDL=y
 CONFIG_FB=y
-CONFIG_FB_UDL=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
index 2d79e52..c85947b 100644 (file)
@@ -62,9 +62,6 @@ struct thread_info {
        .addr_limit = KERNEL_DS,                \
 }
 
-#define init_thread_info    (init_thread_union.thread_info)
-#define init_stack          (init_thread_union.stack)
-
 static inline __attribute_const__ struct thread_info *current_thread_info(void)
 {
        register unsigned long sp asm("sp");
index f35974e..c9173c0 100644 (file)
@@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
                return 0;
 
        __asm__ __volatile__(
+       "       mov     lp_count, %5            \n"
        "       lp      3f                      \n"
        "1:     ldb.ab  %3, [%2, 1]             \n"
        "       breq.d  %3, 0, 3f               \n"
@@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
        "       .word   1b, 4b                  \n"
        "       .previous                       \n"
        : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
-       : "g"(-EFAULT), "l"(count)
-       : "memory");
+       : "g"(-EFAULT), "r"(count)
+       : "lp_count", "lp_start", "lp_end", "memory");
 
        return res;
 }
index fa6d0ff..170b5db 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 7ef7d9a..9d27331 100644 (file)
@@ -199,7 +199,7 @@ static void read_arc_build_cfg_regs(void)
                        unsigned int exec_ctrl;
 
                        READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
-                       cpu->extn.dual_enb = exec_ctrl & 1;
+                       cpu->extn.dual_enb = !(exec_ctrl & 1);
 
                        /* dual issue always present for this core */
                        cpu->extn.dual = 1;
index 74315f3..bf40e06 100644 (file)
@@ -163,7 +163,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
  */
 static int __print_sym(unsigned int address, void *unused)
 {
-       __print_symbol("  %s\n", address);
+       printk("  %pS\n", (void *)address);
        return 0;
 }
 
index bcd7c9f..133a4da 100644 (file)
@@ -83,6 +83,7 @@ DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
 DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR)
 DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
 DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
+DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0)
 
 /*
  * Entry Point for Misaligned Data access Exception, for emulating in software
@@ -115,6 +116,8 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
  * Thus TRAP_S <n> can be used for specific purpose
  *  -1 used for software breakpointing (gdb)
  *  -2 used by kprobes
+ *  -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as
+ *     -fno-isolate-erroneous-paths-dereference
  */
 void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
 {
@@ -134,6 +137,9 @@ void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
                kgdb_trap(regs);
                break;
 
+       case 5:
+               do_trap5_error(address, regs);
+               break;
        default:
                break;
        }
@@ -155,3 +161,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
 
        insterror_is_error(address, regs);
 }
+
+/*
+ * abort() call generated by older gcc for __builtin_trap()
+ */
+void abort(void)
+{
+       __asm__ __volatile__("trap_s  5\n");
+}
index 7d8c1d6..6e9a0a9 100644 (file)
@@ -163,6 +163,9 @@ static void show_ecr_verbose(struct pt_regs *regs)
                else
                        pr_cont("Bus Error, check PRM\n");
 #endif
+       } else if (vec == ECR_V_TRAP) {
+               if (regs->ecr_param == 5)
+                       pr_cont("gcc generated __builtin_trap\n");
        } else {
                pr_cont("Check Programmer's Manual\n");
        }
index f1ac679..46544e8 100644 (file)
@@ -317,25 +317,23 @@ static void __init axs103_early_init(void)
         * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
         * of fudging the freq in DT
         */
+#define AXS103_QUAD_CORE_CPU_FREQ_HZ   50000000
+
        unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
        if (num_cores > 2) {
-               u32 freq = 50, orig;
-               /*
-                * TODO: use cpu node "cpu-freq" param instead of platform-specific
-                * "/cpu_card/core_clk" as it works only if we use fixed-clock for cpu.
-                */
+               u32 freq;
                int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
                const struct fdt_property *prop;
 
                prop = fdt_get_property(initial_boot_params, off,
-                                       "clock-frequency", NULL);
-               orig = be32_to_cpu(*(u32*)(prop->data)) / 1000000;
+                                       "assigned-clock-rates", NULL);
+               freq = be32_to_cpu(*(u32 *)(prop->data));
 
                /* Patching .dtb in-place with new core clock value */
-               if (freq != orig ) {
-                       freq = cpu_to_be32(freq * 1000000);
+               if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) {
+                       freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ);
                        fdt_setprop_inplace(initial_boot_params, off,
-                                           "clock-frequency", &freq, sizeof(freq));
+                                           "assigned-clock-rates", &freq, sizeof(freq));
                }
        }
 #endif
index fd0ae5e..2958aed 100644 (file)
@@ -38,42 +38,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
 #define CREG_PAE               (CREG_BASE + 0x180)
 #define CREG_PAE_UPDATE                (CREG_BASE + 0x194)
 
-#define CREG_CORE_IF_CLK_DIV   (CREG_BASE + 0x4B8)
-#define CREG_CORE_IF_CLK_DIV_2 0x1
-#define CGU_BASE               ARC_PERIPHERAL_BASE
-#define CGU_PLL_STATUS         (ARC_PERIPHERAL_BASE + 0x4)
-#define CGU_PLL_CTRL           (ARC_PERIPHERAL_BASE + 0x0)
-#define CGU_PLL_STATUS_LOCK    BIT(0)
-#define CGU_PLL_STATUS_ERR     BIT(1)
-#define CGU_PLL_CTRL_1GHZ      0x3A10
-#define HSDK_PLL_LOCK_TIMEOUT  500
-
-#define HSDK_PLL_LOCKED() \
-       !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
-
-#define HSDK_PLL_ERR() \
-       !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
-
-static void __init hsdk_set_cpu_freq_1ghz(void)
-{
-       u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
-
-       /*
-        * As we set cpu clock which exceeds 500MHz, the divider for the interface
-        * clock must be programmed to div-by-2.
-        */
-       iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
-
-       /* Set cpu clock to 1GHz */
-       iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
-
-       while (!HSDK_PLL_LOCKED() && timeout--)
-               cpu_relax();
-
-       if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
-               pr_err("Failed to setup CPU frequency to 1GHz!");
-}
-
 #define SDIO_BASE              (ARC_PERIPHERAL_BASE + 0xA000)
 #define SDIO_UHS_REG_EXT       (SDIO_BASE + 0x108)
 #define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
@@ -98,12 +62,6 @@ static void __init hsdk_init_early(void)
         * minimum possible div-by-2.
         */
        iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
-
-       /*
-        * Setup CPU frequency to 1GHz.
-        * TODO: remove it after smart hsdk pll driver will be introduced.
-        */
-       hsdk_set_cpu_freq_1ghz();
 }
 
 static const char *hsdk_compat[] __initconst = {
index 1b81c4e..d37f950 100644 (file)
                                reg-names = "phy";
                                status = "disabled";
                                ti,ctrl_mod = <&usb_ctrl_mod>;
+                               #phy-cells = <0>;
                        };
 
                        usb0: usb@47401000 {
                                reg-names = "phy";
                                status = "disabled";
                                ti,ctrl_mod = <&usb_ctrl_mod>;
+                               #phy-cells = <0>;
                        };
 
                        usb1: usb@47401800 {
index e5b0614..4714a59 100644 (file)
                        reg = <0x48038000 0x2000>,
                              <0x46000000 0x400000>;
                        reg-names = "mpu", "dat";
-                       interrupts = <80>, <81>;
+                       interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 8 2>,
                        reg = <0x4803C000 0x2000>,
                              <0x46400000 0x400000>;
                        reg-names = "mpu", "dat";
-                       interrupts = <82>, <83>;
+                       interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 10 2>,
index 9e92d48..3b9a94c 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&spi0_pins>;
-       dmas = <&edma 16
-               &edma 17>;
+       dmas = <&edma 16 0
+               &edma 17 0>;
        dma-names = "tx0", "rx0";
 
        flash: w25q64cvzpig@0 {
index 25d2d72..678aa02 100644 (file)
        usb3_phy: usb3_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_xhci0_vbus>;
+               #phy-cells = <0>;
        };
 
        reg_xhci0_vbus: xhci0-vbus {
index e1f355f..434dc9a 100644 (file)
@@ -66,6 +66,7 @@
        usb3_1_phy: usb3_1-phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&usb3_1_vbus>;
+               #phy-cells = <0>;
        };
 
        usb3_1_vbus: usb3_1-vbus {
index 36ad571..0a3552e 100644 (file)
        usb3_0_phy: usb3_0_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_usb3_0_vbus>;
+               #phy-cells = <0>;
        };
 
        usb3_1_phy: usb3_1_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_usb3_1_vbus>;
+               #phy-cells = <0>;
        };
 
        reg_usb3_0_vbus: usb3-vbus0 {
index f503955..51b4ee6 100644 (file)
        usb2_1_phy: usb2_1_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_usb2_1_vbus>;
+               #phy-cells = <0>;
        };
 
        usb3_phy: usb3_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_usb3_vbus>;
+               #phy-cells = <0>;
        };
 
        reg_usb3_vbus: usb3-vbus {
index 45d815a..de08d90 100644 (file)
                                compatible = "aspeed,ast2400-vuart";
                                reg = <0x1e787000 0x40>;
                                reg-shift = <2>;
-                               interrupts = <10>;
+                               interrupts = <8>;
                                clocks = <&clk_uart>;
                                no-loopback-test;
                                status = "disabled";
index 5f29010..9b82cc8 100644 (file)
        jc42@18 {
                compatible = "nxp,se97b", "jedec,jc-42.4-temp";
                reg = <0x18>;
+               smbus-timeout-disable;
        };
 
        dpot: mcp4651-104@28 {
index 528b9e3..dcc55aa 100644 (file)
@@ -85,7 +85,7 @@
                timer@20200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x20200 0x100>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&periph_clk>;
                };
 
@@ -93,7 +93,7 @@
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x20600 0x20>;
                        interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
-                                                 IRQ_TYPE_LEVEL_HIGH)>;
+                                                 IRQ_TYPE_EDGE_RISING)>;
                        clocks = <&periph_clk>;
                };
 
index 013431e..dcde93c 100644 (file)
 
        usbphy: phy {
                compatible = "usb-nop-xceiv";
+               #phy-cells = <0>;
        };
 };
index 3bc5084..b8bde13 100644 (file)
        status = "okay";
 };
 
-&sata {
-       status = "okay";
-};
-
 &qspi {
        bspi-sel = <0>;
        flash: m25p80@0 {
index d94d14b..6a44b80 100644 (file)
        status = "okay";
 };
 
-&sata {
-       status = "okay";
-};
-
 &srab {
        compatible = "brcm,bcm58625-srab", "brcm,nsp-srab";
        status = "okay";
index eed89e6..a1f4d6d 100644 (file)
                                        label = "u-boot env";
                                        reg = <0 0x020000>;
                                };
-                               partition@0x020000 {
+                               partition@20000 {
                                        /* The LCDK defaults to booting from this partition */
                                        label = "u-boot";
                                        reg = <0x020000 0x080000>;
                                };
-                               partition@0x0a0000 {
+                               partition@a0000 {
                                        label = "free space";
                                        reg = <0x0a0000 0>;
                                };
index 413dbd5..81942ae 100644 (file)
         */
        battery {
                pinctrl-names = "default";
-               pintctrl-0 = <&battery_pins>;
+               pinctrl-0 = <&battery_pins>;
                compatible = "lego,ev3-battery";
                io-channels = <&adc 4>, <&adc 3>;
                io-channel-names = "voltage", "current";
        batt_volt_en {
                gpio-hog;
                gpios = <6 GPIO_ACTIVE_HIGH>;
-               output-low;
+               output-high;
        };
 };
 
index 9708157..681f548 100644 (file)
@@ -75,6 +75,7 @@
                                reg = <0x47401300 0x100>;
                                reg-names = "phy";
                                ti,ctrl_mod = <&usb_ctrl_mod>;
+                               #phy-cells = <0>;
                        };
 
                        usb0: usb@47401000 {
                                        reg = <0x1b00 0x100>;
                                        reg-names = "phy";
                                        ti,ctrl_mod = <&usb_ctrl_mod>;
+                                       #phy-cells = <0>;
                                };
                        };
 
index b2b95ff..0029ec2 100644 (file)
        status = "okay";
 };
 
+&mixer {
+       status = "okay";
+};
+
 /* eMMC flash */
 &mmc_0 {
        status = "okay";
index 589a67c..84f17f7 100644 (file)
                                clock-names = "ipg", "per";
                        };
 
-                       srtc: srtc@53fa4000 {
-                               compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
-                               reg = <0x53fa4000 0x4000>;
-                               interrupts = <24>;
-                               interrupt-parent = <&tzic>;
-                               clocks = <&clks IMX5_CLK_SRTC_GATE>;
-                               clock-names = "ipg";
-                       };
-
                        iomuxc: iomuxc@53fa8000 {
                                compatible = "fsl,imx53-iomuxc";
                                reg = <0x53fa8000 0x4000>;
index d5181f8..963e169 100644 (file)
                        clock-latency = <61036>; /* two CLK32 periods */
                        operating-points = <
                                /* kHz  uV */
+                               696000  1275000
                                528000  1175000
                                396000  1025000
                                198000  950000
                        >;
                        fsl,soc-operating-points = <
                                /* KHz  uV */
+                               696000  1275000
                                528000  1175000
                                396000  1175000
                                198000  1175000
index cf2f524..27cc913 100644 (file)
@@ -53,7 +53,8 @@
                };
 
                pinctrl: pin-controller@10000 {
-                       pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
+                       pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
+                                    &pmx_gpio_header_gpo>;
                        pinctrl-names = "default";
 
                        pmx_uart0: pmx-uart0 {
                         * ground.
                         */
                        pmx_gpio_header: pmx-gpio-header {
-                               marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
+                               marvell,pins = "mpp17", "mpp29", "mpp28",
                                               "mpp35", "mpp34", "mpp40";
                                marvell,function = "gpio";
                        };
 
+                       pmx_gpio_header_gpo: pxm-gpio-header-gpo {
+                               marvell,pins = "mpp7";
+                               marvell,function = "gpo";
+                       };
+
                        pmx_gpio_init: pmx-init {
                                marvell,pins = "mpp38";
                                marvell,function = "gpio";
index 38faa90..2fa5eb4 100644 (file)
@@ -72,7 +72,8 @@
 };
 
 &gpmc {
-       ranges = <1 0 0x08000000 0x1000000>;    /* CS1: 16MB for LAN9221 */
+       ranges = <0 0 0x30000000 0x1000000      /* CS0: 16MB for NAND */
+                 1 0 0x2c000000 0x1000000>;    /* CS1: 16MB for LAN9221 */
 
        ethernet@gpmc {
                pinctrl-names = "default";
index 26cce4d..29cb804 100644 (file)
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; /* gpio_4 */
+               #phy-cells = <0>;
        };
 };
 
 &gpmc {
-       ranges = <0 0 0x00000000 0x1000000>;    /* CS0: 16MB for NAND */
+       ranges = <0 0 0x30000000 0x1000000>;    /* CS0: 16MB for NAND */
 
        nand@0,0 {
                compatible = "ti,omap2-nand";
 
 &mmc3 {
        interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
-       pinctrl-0 = <&mmc3_pins>;
+       pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
        pinctrl-names = "default";
        vmmc-supply = <&wl12xx_vmmc>;
        non-removable;
        wlcore: wlcore@2 {
                compatible = "ti,wl1273";
                reg = <2>;
-               interrupt-parent = <&gpio5>;
-               interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
+               interrupt-parent = <&gpio1>;
+               interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 2 */
                ref-clock-frequency = <26000000>;
        };
 };
                        OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */
                        OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */
                        OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */
-                       OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT_PULLUP | MUX_MODE4) /* mcbsp4_clkx.gpio_152 */
-                       OMAP3_CORE1_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4)       /* sys_boot1.gpio_3 */
                        OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */
                        OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */
                >;
                        OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4)        /* sys_boot2.gpio_4 */
                >;
        };
+       wl127x_gpio: pinmux_wl127x_gpio_pin {
+               pinctrl-single,pins = <
+                       OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4)         /* sys_boot0.gpio_2 */
+                       OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4)        /* sys_boot1.gpio_3 */
+               >;
+       };
 };
 
 &omap3_pmx_core2 {
index 9408753..67b4de0 100644 (file)
                                reg = <0x2a>;
                                VDDA-supply = <&reg_3p3v>;
                                VDDIO-supply = <&reg_3p3v>;
-                               clocks = <&sys_mclk 1>;
+                               clocks = <&sys_mclk>;
                        };
                };
        };
index a8b148a..44715c8 100644 (file)
                reg = <0x0a>;
                VDDA-supply = <&reg_3p3v>;
                VDDIO-supply = <&reg_3p3v>;
-               clocks = <&sys_mclk 1>;
+               clocks = <&sys_mclk>;
        };
 };
 
index 4926133..0d9faf1 100644 (file)
                                reg = <0x7c00 0x200>;
                        };
 
-                       gpio_intc: interrupt-controller@9880 {
-                               compatible = "amlogic,meson-gpio-intc";
-                               reg = <0xc1109880 0x10>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
-                               status = "disabled";
-                       };
-
                        hwrng: rng@8100 {
                                compatible = "amlogic,meson-rng";
                                reg = <0x8100 0x8>;
                                status = "disabled";
                        };
 
+                       gpio_intc: interrupt-controller@9880 {
+                               compatible = "amlogic,meson-gpio-intc";
+                               reg = <0x9880 0x10>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
+                               status = "disabled";
+                       };
+
                        wdt: watchdog@9900 {
                                compatible = "amlogic,meson6-wdt";
                                reg = <0x9900 0x8>;
index ec2283b..1a5ae4c 100644 (file)
@@ -56,6 +56,7 @@
 
        usb_phy: usb_phy {
                compatible = "usb-nop-xceiv";
+               #phy-cells = <0>;
        };
 
        vbus_reg: vbus_reg {
index 683b96a..0349fcc 100644 (file)
@@ -90,6 +90,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        tfp410: encoder0 {
index 4d2eaf8..3ca8991 100644 (file)
@@ -64,6 +64,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>;      /* gpio_147 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        sound {
index 31d5ebf..ab6003f 100644 (file)
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&hsusb1_power>;
+               #phy-cells = <0>;
        };
 
        /* HS USB Host PHY on PORT 2 */
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        ads7846reg: ads7846-reg {
index dbc3f03..ee64191 100644 (file)
@@ -29,6 +29,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio1 21 GPIO_ACTIVE_LOW>; /* gpio_21 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        leds {
index 4504908..3dc56fb 100644 (file)
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
+               #phy-cells = <0>;
        };
 
        tv0: connector {
index 667f962..ecbec23 100644 (file)
@@ -58,6 +58,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; /* gpio_24 */
                vcc-supply = <&hsusb1_power>;
+               #phy-cells = <0>;
        };
 
        tfp410: encoder {
index e94d942..443f717 100644 (file)
@@ -37,6 +37,7 @@
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;              /* gpio_54 */
+               #phy-cells = <0>;
        };
 };
 
index 343a36d..7ada1e9 100644 (file)
@@ -51,6 +51,7 @@
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_vcc3>;
+               #phy-cells = <0>;
        };
 };
 
index f25e158..ac141fc 100644 (file)
@@ -51,6 +51,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>;      /* gpio_183 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        /* Regulator to trigger the nPoweron signal of the Wifi module */
index 53e007a..cd53dc6 100644 (file)
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; /* GPIO_16 */
                vcc-supply = <&vaux2>;
+               #phy-cells = <0>;
        };
 
        /* HS USB Host VBUS supply
index 9a601d1..6f5bd02 100644 (file)
@@ -46,6 +46,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio6 2 GPIO_ACTIVE_LOW>;       /* gpio_162 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        sound {
index 90b5c71..bb33935 100644 (file)
                                compatible = "ti,ohci-omap3";
                                reg = <0x48064400 0x400>;
                                interrupts = <76>;
+                               remote-wakeup-connected;
                        };
 
                        usbhsehci: ehci@48064800 {
index 8b93d37..24a463f 100644 (file)
@@ -73,6 +73,7 @@
        /* HS USB Host PHY on PORT 1 */
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
+               #phy-cells = <0>;
        };
 
        /* LCD regulator from sw5 source */
index 6e6810c..eb123b2 100644 (file)
@@ -43,6 +43,7 @@
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>;      /* gpio_62 */
+               #phy-cells = <0>;
 
                pinctrl-names = "default";
                pinctrl-0 = <&hsusb1phy_pins>;
index 22c1eee..5501d1b 100644 (file)
@@ -89,6 +89,7 @@
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>;   /* gpio_62 */
+               #phy-cells = <0>;
                vcc-supply = <&hsusb1_power>;
                clocks = <&auxclk3_ck>;
                clock-names = "main_clk";
index 6500bfc..10fce28 100644 (file)
@@ -44,6 +44,7 @@
 
                reset-gpios = <&gpio6 17 GPIO_ACTIVE_LOW>; /* gpio 177 */
                vcc-supply = <&vbat>;
+               #phy-cells = <0>;
 
                clocks = <&auxclk3_ck>;
                clock-names = "main_clk";
index 1dc5a76..cc1a07a 100644 (file)
                elm: elm@48078000 {
                        compatible = "ti,am3352-elm";
                        reg = <0x48078000 0x2000>;
-                       interrupts = <4>;
+                       interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                        ti,hwmods = "elm";
                        status = "disabled";
                };
                        usbhsohci: ohci@4a064800 {
                                compatible = "ti,ohci-omap3";
                                reg = <0x4a064800 0x400>;
-                               interrupt-parent = <&gic>;
                                interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+                               remote-wakeup-connected;
                        };
 
                        usbhsehci: ehci@4a064c00 {
                                compatible = "ti,ehci-omap";
                                reg = <0x4a064c00 0x400>;
-                               interrupt-parent = <&gic>;
                                interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
index 575ecff..1b20838 100644 (file)
                clocks = <&auxclk1_ck>;
                clock-names = "main_clk";
                clock-frequency = <19200000>;
+               #phy-cells = <0>;
        };
 
        /* HS USB Host PHY on PORT 3 */
        hsusb3_phy: hsusb3_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; /* gpio3_79 ETH_NRESET */
+               #phy-cells = <0>;
        };
 
        tpd12s015: encoder {
index 5b172a0..5e21fb4 100644 (file)
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>; /* gpio3_76 HUB_RESET */
+               #phy-cells = <0>;
        };
 
        /* HS USB Host PHY on PORT 3 */
        hsusb3_phy: hsusb3_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 ETH_RESET */
+               #phy-cells = <0>;
        };
 
        leds {
index 4cd0005..51a7fb3 100644 (file)
                                compatible = "ti,ohci-omap3";
                                reg = <0x4a064800 0x400>;
                                interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+                               remote-wakeup-connected;
                        };
 
                        usbhsehci: ehci@4a064c00 {
index 2f017fe..62baabd 100644 (file)
                clock-names = "extal", "usb_extal";
                #clock-cells = <2>;
                #power-domain-cells = <0>;
+               #reset-cells = <1>;
        };
 
        prr: chipid@ff000044 {
index 131f65b..3d080e0 100644 (file)
                        clock-names = "extal";
                        #clock-cells = <2>;
                        #power-domain-cells = <0>;
+                       #reset-cells = <1>;
                };
        };
 
index 58eae56..0cd1035 100644 (file)
                clock-names = "extal", "usb_extal";
                #clock-cells = <2>;
                #power-domain-cells = <0>;
+               #reset-cells = <1>;
        };
 
        rst: reset-controller@e6160000 {
index 905e50c..5643976 100644 (file)
                clock-names = "extal", "usb_extal";
                #clock-cells = <2>;
                #power-domain-cells = <0>;
+               #reset-cells = <1>;
        };
 
        rst: reset-controller@e6160000 {
index c6d92c2..d23ee6d 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&vdd_arm>;
+};
+
 &i2c1 {
        status = "okay";
        clock-frequency = <400000>;
index cd24894..6102e4e 100644 (file)
        iep_mmu: iommu@ff900800 {
                compatible = "rockchip,iommu";
                reg = <0x0 0xff900800 0x0 0x40>;
-               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
+               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "iep_mmu";
                #iommu-cells = <0>;
                status = "disabled";
index b91300d..4f2f2ee 100644 (file)
                        reg = <0x01c16000 0x1000>;
                        interrupts = <58>;
                        clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
-                                <&ccu 9>,
-                                <&ccu 18>;
+                                <&ccu CLK_PLL_VIDEO0_2X>,
+                                <&ccu CLK_PLL_VIDEO1_2X>;
                        clock-names = "ahb", "mod", "pll-0", "pll-1";
                        dmas = <&dma SUN4I_DMA_NORMAL 16>,
                               <&dma SUN4I_DMA_NORMAL 16>,
 
                                        be1_out_tcon0: endpoint@0 {
                                                reg = <0>;
-                                               remote-endpoint = <&tcon1_in_be0>;
+                                               remote-endpoint = <&tcon0_in_be1>;
                                        };
 
                                        be1_out_tcon1: endpoint@1 {
index 6ae4d95..316cb8b 100644 (file)
@@ -82,8 +82,8 @@
                        reg = <0x01c16000 0x1000>;
                        interrupts = <58>;
                        clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>,
-                                <&ccu 9>,
-                                <&ccu 16>;
+                                <&ccu CLK_PLL_VIDEO0_2X>,
+                                <&ccu CLK_PLL_VIDEO1_2X>;
                        clock-names = "ahb", "mod", "pll-0", "pll-1";
                        dmas = <&dma SUN4I_DMA_NORMAL 16>,
                               <&dma SUN4I_DMA_NORMAL 16>,
index 8bfa12b..72d3fe4 100644 (file)
                        interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_AHB1_HDMI>, <&ccu CLK_HDMI>,
                                 <&ccu CLK_HDMI_DDC>,
-                                <&ccu 7>,
-                                <&ccu 13>;
+                                <&ccu CLK_PLL_VIDEO0_2X>,
+                                <&ccu CLK_PLL_VIDEO1_2X>;
                        clock-names = "ahb", "mod", "ddc", "pll-0", "pll-1";
                        resets = <&ccu RST_AHB1_HDMI>;
                        reset-names = "ahb";
index 68dfa82..bd0cd32 100644 (file)
                        reg = <0x01c16000 0x1000>;
                        interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
-                                <&ccu 9>,
-                                <&ccu 18>;
+                                <&ccu CLK_PLL_VIDEO0_2X>,
+                                <&ccu CLK_PLL_VIDEO1_2X>;
                        clock-names = "ahb", "mod", "pll-0", "pll-1";
                        dmas = <&dma SUN4I_DMA_NORMAL 16>,
                               <&dma SUN4I_DMA_NORMAL 16>,
 
                                        be1_out_tcon0: endpoint@0 {
                                                reg = <0>;
-                                               remote-endpoint = <&tcon1_in_be0>;
+                                               remote-endpoint = <&tcon0_in_be1>;
                                        };
 
                                        be1_out_tcon1: endpoint@1 {
index 9871553..a021ee6 100644 (file)
        status = "okay";
 
        axp81x: pmic@3a3 {
+               compatible = "x-powers,axp813";
                reg = <0x3a3>;
                interrupt-parent = <&r_intc>;
                interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
index 0ec1b0a..ff72a8e 100644 (file)
                        reg = <0x6e000 0x400>;
                        ranges = <0 0x6e000 0x400>;
                        interrupt-parent = <&gic>;
-                       interrupt-controller;
                        #address-cells = <1>;
                        #size-cells = <1>;
 
index 02a6227..4b8edc8 100644 (file)
                                        switch0port10: port@10 {
                                                reg = <10>;
                                                label = "dsa";
-                                               phy-mode = "xgmii";
+                                               phy-mode = "xaui";
                                                link = <&switch1port10>;
                                        };
                                };
                                        switch1port10: port@10 {
                                                reg = <10>;
                                                label = "dsa";
-                                               phy-mode = "xgmii";
+                                               phy-mode = "xaui";
                                                link = <&switch0port10>;
                                        };
                                };
 };
 
 &i2c1 {
-       at24mac602@0 {
+       at24mac602@50 {
                compatible = "atmel,24c02";
                reg = <0x50>;
                read-only;
index 5caaf97..df433ab 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=8
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_CMA=y
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CPU_FREQ=y
@@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_ATA=y
 CONFIG_AHCI_SUNXI=y
index c878145..3ab8b37 100644 (file)
 #else
 #define VTTBR_X                (5 - KVM_T0SZ)
 #endif
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  _AC(48, ULL)
 #define VTTBR_VMID_MASK(size)  (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
index 242151e..a9f7d3f 100644 (file)
@@ -285,6 +285,11 @@ static inline void kvm_arm_init_debug(void) {}
 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
+                                            struct kvm_run *run)
+{
+       return false;
+}
 
 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
index 2a029bc..1a7a17b 100644 (file)
@@ -221,7 +221,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
 }
 #define        __HAVE_ARCH_PTE_SPECIAL
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
 #define pmd_dirty(pmd)         (pmd_isset((pmd), L_PMD_SECT_DIRTY))
 #define pud_page(pud)          pmd_page(__pmd(pud_val(pud)))
index 776757d..e71cc35 100644 (file)
@@ -75,9 +75,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,                                    \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /*
  * how to get the current stack pointer in C
  */
index 4d53de3..4d1cc18 100644 (file)
@@ -7,6 +7,7 @@ generated-y += unistd-oabi.h
 generated-y += unistd-eabi.h
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index 7f4d80c..0f07579 100644 (file)
        mov     r2, sp
        ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [r2, #\offset + S_PC]!      @ get pc
-       tst     r1, #0xcf
+       tst     r1, #PSR_I_BIT | 0x0f
        bne     1f
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [sp, #\offset + S_PC]       @ get pc
        add     sp, sp, #\offset + S_SP
-       tst     r1, #0xcf
+       tst     r1, #PSR_I_BIT | 0x0f
        bne     1f
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 
index 5cf0488..3e26c6f 100644 (file)
@@ -793,7 +793,6 @@ void abort(void)
        /* if that doesn't kill us, halt */
        panic("Oops failed to kill thread");
 }
-EXPORT_SYMBOL(abort);
 
 void __init trap_init(void)
 {
index 1712f13..b83fdc0 100644 (file)
                .pushsection .text.fixup,"ax"
                .align  4
 9001:          mov     r4, #-EFAULT
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+               ldr     r5, [sp, #9*4]          @ *err_ptr
+#else
                ldr     r5, [sp, #8*4]          @ *err_ptr
+#endif
                str     r4, [r5]
                ldmia   sp, {r1, r2}            @ retrieve dst, len
                add     r2, r2, r1
index 8be04ec..5ace938 100644 (file)
@@ -868,10 +868,10 @@ static const struct dma_slave_map dm365_edma_map[] = {
        { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 17) },
        { "spi_davinci.3", "tx", EDMA_FILTER_PARAM(0, 18) },
        { "spi_davinci.3", "rx", EDMA_FILTER_PARAM(0, 19) },
-       { "dm6441-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) },
-       { "dm6441-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) },
-       { "dm6441-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) },
-       { "dm6441-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) },
+       { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) },
+       { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) },
+       { "da830-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) },
+       { "da830-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) },
 };
 
 static struct edma_soc_info dm365_edma_pdata = {
@@ -925,12 +925,14 @@ static struct resource edma_resources[] = {
        /* not using TC*_ERR */
 };
 
-static struct platform_device dm365_edma_device = {
-       .name                   = "edma",
-       .id                     = 0,
-       .dev.platform_data      = &dm365_edma_pdata,
-       .num_resources          = ARRAY_SIZE(edma_resources),
-       .resource               = edma_resources,
+static const struct platform_device_info dm365_edma_device __initconst = {
+       .name           = "edma",
+       .id             = 0,
+       .dma_mask       = DMA_BIT_MASK(32),
+       .res            = edma_resources,
+       .num_res        = ARRAY_SIZE(edma_resources),
+       .data           = &dm365_edma_pdata,
+       .size_data      = sizeof(dm365_edma_pdata),
 };
 
 static struct resource dm365_asp_resources[] = {
@@ -1428,13 +1430,18 @@ int __init dm365_init_video(struct vpfe_config *vpfe_cfg,
 
 static int __init dm365_init_devices(void)
 {
+       struct platform_device *edma_pdev;
        int ret = 0;
 
        if (!cpu_is_davinci_dm365())
                return 0;
 
        davinci_cfg_reg(DM365_INT_EDMA_CC);
-       platform_device_register(&dm365_edma_device);
+       edma_pdev = platform_device_register_full(&dm365_edma_device);
+       if (IS_ERR(edma_pdev)) {
+               pr_warn("%s: Failed to register eDMA\n", __func__);
+               return PTR_ERR(edma_pdev);
+       }
 
        platform_device_register(&dm365_mdio_device);
        platform_device_register(&dm365_emac_device);
index 2555f90..cad7ee8 100644 (file)
@@ -102,7 +102,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible,
 
        scu_base = of_iomap(node, 0);
        if (!scu_base) {
-               pr_err("Couln't map SCU registers\n");
+               pr_err("Couldn't map SCU registers\n");
                return;
        }
 
index d555791..83c6fa7 100644 (file)
@@ -68,14 +68,17 @@ void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2)
 int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
                        u8 *idlest_reg_id)
 {
+       int ret;
        if (!cm_ll_data->split_idlest_reg) {
                WARN_ONCE(1, "cm: %s: no low-level function defined\n",
                          __func__);
                return -EINVAL;
        }
 
-       return cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst,
+       ret = cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst,
                                           idlest_reg_id);
+       *prcm_inst -= cm_base.offset;
+       return ret;
 }
 
 /**
@@ -337,6 +340,7 @@ int __init omap2_cm_base_init(void)
                if (mem) {
                        mem->pa = res.start + data->offset;
                        mem->va = data->mem + data->offset;
+                       mem->offset = data->offset;
                }
 
                data->np = np;
index 5ac122e..fa7f308 100644 (file)
@@ -73,6 +73,27 @@ phys_addr_t omap_secure_ram_mempool_base(void)
        return omap_secure_memblock_base;
 }
 
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
+u32 omap3_save_secure_ram(void __iomem *addr, int size)
+{
+       u32 ret;
+       u32 param[5];
+
+       if (size != OMAP3_SAVE_SECURE_RAM_SZ)
+               return OMAP3_SAVE_SECURE_RAM_SZ;
+
+       param[0] = 4;           /* Number of arguments */
+       param[1] = __pa(addr);  /* Physical address for saving */
+       param[2] = 0;
+       param[3] = 1;
+       param[4] = 1;
+
+       ret = save_secure_ram_context(__pa(param));
+
+       return ret;
+}
+#endif
+
 /**
  * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
  * @idx: The PPA API index
index bae263f..c509cde 100644 (file)
@@ -31,6 +31,8 @@
 /* Maximum Secure memory storage size */
 #define OMAP_SECURE_RAM_STORAGE        (88 * SZ_1K)
 
+#define OMAP3_SAVE_SECURE_RAM_SZ       0x803F
+
 /* Secure low power HAL API index */
 #define OMAP4_HAL_SAVESECURERAM_INDEX  0x1a
 #define OMAP4_HAL_SAVEHW_INDEX         0x1b
@@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
 extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
 extern phys_addr_t omap_secure_ram_mempool_base(void);
 extern int omap_secure_ram_reserve_memblock(void);
+extern u32 save_secure_ram_context(u32 args_pa);
+extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size);
 
 extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
                                  u32 arg1, u32 arg2, u32 arg3, u32 arg4);
index d45cbfd..f038805 100644 (file)
@@ -391,10 +391,8 @@ omap_device_copy_resources(struct omap_hwmod *oh,
        const char *name;
        int error, irq = 0;
 
-       if (!oh || !oh->od || !oh->od->pdev) {
-               error = -EINVAL;
-               goto error;
-       }
+       if (!oh || !oh->od || !oh->od->pdev)
+               return -EINVAL;
 
        np = oh->od->pdev->dev.of_node;
        if (!np) {
@@ -516,8 +514,10 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
                goto odbs_exit1;
 
        od = omap_device_alloc(pdev, &oh, 1);
-       if (IS_ERR(od))
+       if (IS_ERR(od)) {
+               ret = PTR_ERR(od);
                goto odbs_exit1;
+       }
 
        ret = platform_device_add_data(pdev, pdata, pdata_len);
        if (ret)
index d2106ae..52c9d58 100644 (file)
@@ -1646,6 +1646,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = {
        .main_clk       = "mmchs3_fck",
        .prcm           = {
                .omap2 = {
+                       .module_offs = CORE_MOD,
                        .prcm_reg_id = 1,
                        .module_bit = OMAP3430_EN_MMC3_SHIFT,
                        .idlest_reg_id = 1,
index b668719..8e30772 100644 (file)
@@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz;
 /* ... and its pointer from SRAM after copy */
 extern void (*omap3_do_wfi_sram)(void);
 
-/* save_secure_ram_context function pointer and size, for copy to SRAM */
-extern int save_secure_ram_context(u32 *addr);
-extern unsigned int save_secure_ram_context_sz;
-
 extern void omap3_save_scratchpad_contents(void);
 
 #define PM_RTA_ERRATUM_i608            (1 << 0)
index 841ba19..36c5554 100644 (file)
@@ -48,6 +48,7 @@
 #include "prm3xxx.h"
 #include "pm.h"
 #include "sdrc.h"
+#include "omap-secure.h"
 #include "sram.h"
 #include "control.h"
 #include "vc.h"
@@ -66,7 +67,6 @@ struct power_state {
 
 static LIST_HEAD(pwrst_list);
 
-static int (*_omap_save_secure_sram)(u32 *addr);
 void (*omap3_do_wfi_sram)(void);
 
 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
@@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void)
                 * will hang the system.
                 */
                pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
-               ret = _omap_save_secure_sram((u32 *)(unsigned long)
-                               __pa(omap3_secure_ram_storage));
+               ret = omap3_save_secure_ram(omap3_secure_ram_storage,
+                                           OMAP3_SAVE_SECURE_RAM_SZ);
                pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
                /* Following is for error tracking, it should not happen */
                if (ret) {
@@ -434,15 +434,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
  *
  * The minimum set of functions is pushed to SRAM for execution:
  * - omap3_do_wfi for erratum i581 WA,
- * - save_secure_ram_context for security extensions.
  */
 void omap_push_sram_idle(void)
 {
        omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
-
-       if (omap_type() != OMAP2_DEVICE_TYPE_GP)
-               _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
-                               save_secure_ram_context_sz);
 }
 
 static void __init pm_errata_configure(void)
@@ -553,7 +548,7 @@ int __init omap3_pm_init(void)
        clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
        if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
                omap3_secure_ram_storage =
-                       kmalloc(0x803F, GFP_KERNEL);
+                       kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
                if (!omap3_secure_ram_storage)
                        pr_err("Memory allocation failed when allocating for secure sram context\n");
 
index 0592b23..0977da0 100644 (file)
@@ -528,6 +528,7 @@ struct omap_prcm_irq_setup {
 struct omap_domain_base {
        u32 pa;
        void __iomem *va;
+       s16 offset;
 };
 
 /**
index d2c5bca..ebaf80d 100644 (file)
@@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
        return v;
 }
 
-static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
-{
-       u32 v;
-
-       v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
-       v &= AM33XX_LASTPOWERSTATEENTERED_MASK;
-       v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT;
-
-       return v;
-}
-
 static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
 {
        am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK,
@@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = {
        .pwrdm_set_next_pwrst           = am33xx_pwrdm_set_next_pwrst,
        .pwrdm_read_next_pwrst          = am33xx_pwrdm_read_next_pwrst,
        .pwrdm_read_pwrst               = am33xx_pwrdm_read_pwrst,
-       .pwrdm_read_prev_pwrst          = am33xx_pwrdm_read_prev_pwrst,
        .pwrdm_set_logic_retst          = am33xx_pwrdm_set_logic_retst,
        .pwrdm_read_logic_pwrst         = am33xx_pwrdm_read_logic_pwrst,
        .pwrdm_read_logic_retst         = am33xx_pwrdm_read_logic_retst,
index fa5fd24..22daf4e 100644 (file)
@@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
 ENDPROC(enable_omap3630_toggle_l2_on_restore)
 
 /*
- * Function to call rom code to save secure ram context. This gets
- * relocated to SRAM, so it can be all in .data section. Otherwise
- * we need to initialize api_params separately.
+ * Function to call rom code to save secure ram context.
+ *
+ * r0 = physical address of the parameters
  */
-       .data
-       .align  3
 ENTRY(save_secure_ram_context)
        stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
-       adr     r3, api_params          @ r3 points to parameters
-       str     r0, [r3,#0x4]           @ r0 has sdram address
-       ldr     r12, high_mask
-       and     r3, r3, r12
-       ldr     r12, sram_phy_addr_mask
-       orr     r3, r3, r12
+       mov     r3, r0                  @ physical address of parameters
        mov     r0, #25                 @ set service ID for PPA
        mov     r12, r0                 @ copy secure service ID in r12
        mov     r1, #0                  @ set task id for ROM code in r1
@@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context)
        nop
        nop
        ldmfd   sp!, {r4 - r11, pc}
-       .align
-sram_phy_addr_mask:
-       .word   SRAM_BASE_P
-high_mask:
-       .word   0xffff
-api_params:
-       .word   0x4, 0x0, 0x0, 0x1, 0x1
 ENDPROC(save_secure_ram_context)
-ENTRY(save_secure_ram_context_sz)
-       .word   . - save_secure_ram_context
-
-       .text
 
 /*
  * ======================
index c199990..323a4df 100644 (file)
 
 int bpf_jit_enable __read_mostly;
 
+/*
+ * eBPF prog stack layout:
+ *
+ *                         high
+ * original ARM_SP =>     +-----+
+ *                        |     | callee saved registers
+ *                        +-----+ <= (BPF_FP + SCRATCH_SIZE)
+ *                        | ... | eBPF JIT scratch space
+ * eBPF fp register =>    +-----+
+ *   (BPF_FP)             | ... | eBPF prog stack
+ *                        +-----+
+ *                        |RSVD | JIT scratchpad
+ * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
+ *                        |     |
+ *                        | ... | Function call stack
+ *                        |     |
+ *                        +-----+
+ *                          low
+ *
+ * The callee saved registers depends on whether frame pointers are enabled.
+ * With frame pointers (to be compliant with the ABI):
+ *
+ *                                high
+ * original ARM_SP =>     +------------------+ \
+ *                        |        pc        | |
+ * current ARM_FP =>      +------------------+ } callee saved registers
+ *                        |r4-r8,r10,fp,ip,lr| |
+ *                        +------------------+ /
+ *                                low
+ *
+ * Without frame pointers:
+ *
+ *                                high
+ * original ARM_SP =>     +------------------+
+ *                        | r4-r8,r10,fp,lr  | callee saved registers
+ * current ARM_FP =>      +------------------+
+ *                                low
+ *
+ * When popping registers off the stack at the end of a BPF function, we
+ * reference them via the current ARM_FP register.
+ */
+#define CALLEE_MASK    (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
+                        1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
+                        1 << ARM_FP)
+#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
+#define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
+
 #define STACK_OFFSET(k)        (k)
 #define TMP_REG_1      (MAX_BPF_JIT_REG + 0)   /* TEMP Register 1 */
 #define TMP_REG_2      (MAX_BPF_JIT_REG + 1)   /* TEMP Register 2 */
 #define TCALL_CNT      (MAX_BPF_JIT_REG + 2)   /* Tail Call Count */
 
-/* Flags used for JIT optimization */
-#define SEEN_CALL      (1 << 0)
-
 #define FLAG_IMM_OVERFLOW      (1 << 0)
 
 /*
@@ -95,7 +139,6 @@ static const u8 bpf2a32[][2] = {
  * idx                 :       index of current last JITed instruction.
  * prologue_bytes      :       bytes used in prologue.
  * epilogue_offset     :       offset of epilogue starting.
- * seen                        :       bit mask used for JIT optimization.
  * offsets             :       array of eBPF instruction offsets in
  *                             JITed code.
  * target              :       final JITed code.
@@ -110,7 +153,6 @@ struct jit_ctx {
        unsigned int idx;
        unsigned int prologue_bytes;
        unsigned int epilogue_offset;
-       u32 seen;
        u32 flags;
        u32 *offsets;
        u32 *target;
@@ -179,8 +221,13 @@ static void jit_fill_hole(void *area, unsigned int size)
                *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
 }
 
-/* Stack must be multiples of 16 Bytes */
-#define STACK_ALIGN(sz) (((sz) + 3) & ~3)
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+/* EABI requires the stack to be aligned to 64-bit boundaries */
+#define STACK_ALIGNMENT        8
+#else
+/* Stack must be aligned to 32-bit boundaries */
+#define STACK_ALIGNMENT        4
+#endif
 
 /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
  * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
@@ -194,7 +241,7 @@ static void jit_fill_hole(void *area, unsigned int size)
         + SCRATCH_SIZE + \
         + 4 /* extra for skb_copy_bits buffer */)
 
-#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
+#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
 
 /* Get the offset of eBPF REGISTERs stored on scratch space. */
 #define STACK_VAR(off) (STACK_SIZE-off-4)
@@ -285,16 +332,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
                emit_mov_i_no8m(rd, val, ctx);
 }
 
-static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
 {
-       ctx->seen |= SEEN_CALL;
-#if __LINUX_ARM_ARCH__ < 5
-       emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
-
        if (elf_hwcap & HWCAP_THUMB)
                emit(ARM_BX(tgt_reg), ctx);
        else
                emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
+}
+
+static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+{
+#if __LINUX_ARM_ARCH__ < 5
+       emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
+       emit_bx_r(tgt_reg, ctx);
 #else
        emit(ARM_BLX_R(tgt_reg), ctx);
 #endif
@@ -354,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
        }
 
        /* Call appropriate function */
-       ctx->seen |= SEEN_CALL;
        emit_mov_i(ARM_IP, op == BPF_DIV ?
                   (u32)jit_udiv32 : (u32)jit_mod32, ctx);
        emit_blx_r(ARM_IP, ctx);
@@ -620,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
        /* Do LSH operation */
        emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
-       /* As we are using ARM_LR */
-       ctx->seen |= SEEN_CALL;
        emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
        emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
        emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
@@ -656,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
        /* Do the ARSH operation */
        emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
-       /* As we are using ARM_LR */
-       ctx->seen |= SEEN_CALL;
        emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
        emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
        _emit(ARM_COND_MI, ARM_B(0), ctx);
@@ -692,8 +737,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
        /* Do LSH operation */
        emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
-       /* As we are using ARM_LR */
-       ctx->seen |= SEEN_CALL;
        emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
        emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
        emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
@@ -828,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
        /* Do Multiplication */
        emit(ARM_MUL(ARM_IP, rd, rn), ctx);
        emit(ARM_MUL(ARM_LR, rm, rt), ctx);
-       /* As we are using ARM_LR */
-       ctx->seen |= SEEN_CALL;
        emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
 
        emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
@@ -872,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
 }
 
 /* dst = *(size*)(src + off) */
-static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk,
-                             const s32 off, struct jit_ctx *ctx, const u8 sz){
+static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
+                             s32 off, struct jit_ctx *ctx, const u8 sz){
        const u8 *tmp = bpf2a32[TMP_REG_1];
-       u8 rd = dstk ? tmp[1] : dst;
+       const u8 *rd = dstk ? tmp : dst;
        u8 rm = src;
+       s32 off_max;
 
-       if (off) {
+       if (sz == BPF_H)
+               off_max = 0xff;
+       else
+               off_max = 0xfff;
+
+       if (off < 0 || off > off_max) {
                emit_a32_mov_i(tmp[0], off, false, ctx);
                emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
                rm = tmp[0];
+               off = 0;
+       } else if (rd[1] == rm) {
+               emit(ARM_MOV_R(tmp[0], rm), ctx);
+               rm = tmp[0];
        }
        switch (sz) {
-       case BPF_W:
-               /* Load a Word */
-               emit(ARM_LDR_I(rd, rm, 0), ctx);
+       case BPF_B:
+               /* Load a Byte */
+               emit(ARM_LDRB_I(rd[1], rm, off), ctx);
+               emit_a32_mov_i(dst[0], 0, dstk, ctx);
                break;
        case BPF_H:
                /* Load a HalfWord */
-               emit(ARM_LDRH_I(rd, rm, 0), ctx);
+               emit(ARM_LDRH_I(rd[1], rm, off), ctx);
+               emit_a32_mov_i(dst[0], 0, dstk, ctx);
                break;
-       case BPF_B:
-               /* Load a Byte */
-               emit(ARM_LDRB_I(rd, rm, 0), ctx);
+       case BPF_W:
+               /* Load a Word */
+               emit(ARM_LDR_I(rd[1], rm, off), ctx);
+               emit_a32_mov_i(dst[0], 0, dstk, ctx);
+               break;
+       case BPF_DW:
+               /* Load a Double Word */
+               emit(ARM_LDR_I(rd[1], rm, off), ctx);
+               emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
                break;
        }
        if (dstk)
-               emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+               emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
+       if (dstk && sz == BPF_DW)
+               emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
 }
 
 /* Arithmatic Operation */
@@ -906,7 +967,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
                             const u8 rn, struct jit_ctx *ctx, u8 op) {
        switch (op) {
        case BPF_JSET:
-               ctx->seen |= SEEN_CALL;
                emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
                emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
                emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
@@ -945,7 +1005,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        const u8 *tcc = bpf2a32[TCALL_CNT];
        const int idx0 = ctx->idx;
 #define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+#define jmp_offset (out_offset - (cur_offset) - 2)
        u32 off, lo, hi;
 
        /* if (index >= array->map.max_entries)
@@ -956,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        emit_a32_mov_i(tmp[1], off, false, ctx);
        emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
        emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
-       /* index (64 bit) */
+       /* index is 32-bit for arrays */
        emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
        /* index >= array->map.max_entries */
        emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
@@ -997,7 +1057,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        emit_a32_mov_i(tmp2[1], off, false, ctx);
        emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
        emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
-       emit(ARM_BX(tmp[1]), ctx);
+       emit_bx_r(tmp[1], ctx);
 
        /* out: */
        if (out_offset == -1)
@@ -1070,54 +1130,22 @@ static void build_prologue(struct jit_ctx *ctx)
        const u8 r2 = bpf2a32[BPF_REG_1][1];
        const u8 r3 = bpf2a32[BPF_REG_1][0];
        const u8 r4 = bpf2a32[BPF_REG_6][1];
-       const u8 r5 = bpf2a32[BPF_REG_6][0];
-       const u8 r6 = bpf2a32[TMP_REG_1][1];
-       const u8 r7 = bpf2a32[TMP_REG_1][0];
-       const u8 r8 = bpf2a32[TMP_REG_2][1];
-       const u8 r10 = bpf2a32[TMP_REG_2][0];
        const u8 fplo = bpf2a32[BPF_REG_FP][1];
        const u8 fphi = bpf2a32[BPF_REG_FP][0];
-       const u8 sp = ARM_SP;
        const u8 *tcc = bpf2a32[TCALL_CNT];
 
-       u16 reg_set = 0;
-
-       /*
-        * eBPF prog stack layout
-        *
-        *                         high
-        * original ARM_SP =>     +-----+ eBPF prologue
-        *                        |FP/LR|
-        * current ARM_FP =>      +-----+
-        *                        | ... | callee saved registers
-        * eBPF fp register =>    +-----+ <= (BPF_FP)
-        *                        | ... | eBPF JIT scratch space
-        *                        |     | eBPF prog stack
-        *                        +-----+
-        *                        |RSVD | JIT scratchpad
-        * current A64_SP =>      +-----+ <= (BPF_FP - STACK_SIZE)
-        *                        |     |
-        *                        | ... | Function call stack
-        *                        |     |
-        *                        +-----+
-        *                          low
-        */
-
        /* Save callee saved registers. */
-       reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
 #ifdef CONFIG_FRAME_POINTER
-       reg_set |= (1<<ARM_FP) | (1<<ARM_IP) | (1<<ARM_LR) | (1<<ARM_PC);
-       emit(ARM_MOV_R(ARM_IP, sp), ctx);
+       u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
+       emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
        emit(ARM_PUSH(reg_set), ctx);
        emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
 #else
-       /* Check if call instruction exists in BPF body */
-       if (ctx->seen & SEEN_CALL)
-               reg_set |= (1<<ARM_LR);
-       emit(ARM_PUSH(reg_set), ctx);
+       emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
+       emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
 #endif
        /* Save frame pointer for later */
-       emit(ARM_SUB_I(ARM_IP, sp, SCRATCH_SIZE), ctx);
+       emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
 
        ctx->stack_size = imm8m(STACK_SIZE);
 
@@ -1140,33 +1168,19 @@ static void build_prologue(struct jit_ctx *ctx)
        /* end of prologue */
 }
 
+/* restore callee saved registers. */
 static void build_epilogue(struct jit_ctx *ctx)
 {
-       const u8 r4 = bpf2a32[BPF_REG_6][1];
-       const u8 r5 = bpf2a32[BPF_REG_6][0];
-       const u8 r6 = bpf2a32[TMP_REG_1][1];
-       const u8 r7 = bpf2a32[TMP_REG_1][0];
-       const u8 r8 = bpf2a32[TMP_REG_2][1];
-       const u8 r10 = bpf2a32[TMP_REG_2][0];
-       u16 reg_set = 0;
-
-       /* unwind function call stack */
-       emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
-
-       /* restore callee saved registers. */
-       reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
 #ifdef CONFIG_FRAME_POINTER
-       /* the first instruction of the prologue was: mov ip, sp */
-       reg_set |= (1<<ARM_FP) | (1<<ARM_SP) | (1<<ARM_PC);
+       /* When using frame pointers, some additional registers need to
+        * be loaded. */
+       u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
+       emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
        emit(ARM_LDM(ARM_SP, reg_set), ctx);
 #else
-       if (ctx->seen & SEEN_CALL)
-               reg_set |= (1<<ARM_PC);
        /* Restore callee saved registers. */
-       emit(ARM_POP(reg_set), ctx);
-       /* Return back to the callee function */
-       if (!(ctx->seen & SEEN_CALL))
-               emit(ARM_BX(ARM_LR), ctx);
+       emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
+       emit(ARM_POP(CALLEE_POP_MASK), ctx);
 #endif
 }
 
@@ -1394,8 +1408,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                        emit_rev32(rt, rt, ctx);
                        goto emit_bswap_uxt;
                case 64:
-                       /* Because of the usage of ARM_LR */
-                       ctx->seen |= SEEN_CALL;
                        emit_rev32(ARM_LR, rt, ctx);
                        emit_rev32(rt, rd, ctx);
                        emit(ARM_MOV_R(rd, ARM_LR), ctx);
@@ -1448,22 +1460,7 @@ exit:
                rn = sstk ? tmp2[1] : src_lo;
                if (sstk)
                        emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
-               switch (BPF_SIZE(code)) {
-               case BPF_W:
-                       /* Load a Word */
-               case BPF_H:
-                       /* Load a Half-Word */
-               case BPF_B:
-                       /* Load a Byte */
-                       emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code));
-                       emit_a32_mov_i(dst_hi, 0, dstk, ctx);
-                       break;
-               case BPF_DW:
-                       /* Load a double word */
-                       emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W);
-                       emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W);
-                       break;
-               }
+               emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
                break;
        /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
        case BPF_LD | BPF_ABS | BPF_W:
index a93339f..c9a7e9e 100644 (file)
@@ -557,7 +557,6 @@ config QCOM_QDF2400_ERRATUM_0065
 
          If unsure, say Y.
 
-
 config SOCIONEXT_SYNQUACER_PREITS
        bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
        default y
@@ -576,6 +575,17 @@ config HISILICON_ERRATUM_161600802
          a 128kB offset to be applied to the target address in this commands.
 
          If unsure, say Y.
+
+config QCOM_FALKOR_ERRATUM_E1041
+       bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
+       default y
+       help
+         Falkor CPU may speculatively fetch instructions from an improper
+         memory location when MMU translation is changed from SCTLR_ELn[M]=1
+         to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
+
+         If unsure, say Y.
+
 endmenu
 
 
index b35788c..b481b4a 100644 (file)
@@ -83,9 +83,6 @@ endif
 
 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
 KBUILD_LDFLAGS_MODULE  += -T $(srctree)/arch/arm64/kernel/module.lds
-ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
-KBUILD_LDFLAGS_MODULE  += $(objtree)/arch/arm64/kernel/ftrace-mod.o
-endif
 endif
 
 # Default value
index d7c22d5..4aa50b9 100644 (file)
@@ -12,6 +12,7 @@ subdir-y += cavium
 subdir-y += exynos
 subdir-y += freescale
 subdir-y += hisilicon
+subdir-y += lg
 subdir-y += marvell
 subdir-y += mediatek
 subdir-y += nvidia
@@ -22,5 +23,4 @@ subdir-y += rockchip
 subdir-y += socionext
 subdir-y += sprd
 subdir-y += xilinx
-subdir-y += lg
 subdir-y += zte
index 45bdbfb..4a8d3f8 100644 (file)
@@ -75,6 +75,7 @@
        pinctrl-0 = <&rgmii_pins>;
        phy-mode = "rgmii";
        phy-handle = <&ext_rgmii_phy>;
+       phy-supply = <&reg_dc1sw>;
        status = "okay";
 };
 
index 806442d..604cdae 100644 (file)
@@ -77,6 +77,7 @@
        pinctrl-0 = <&rmii_pins>;
        phy-mode = "rmii";
        phy-handle = <&ext_rmii_phy1>;
+       phy-supply = <&reg_dc1sw>;
        status = "okay";
 
 };
index 0eb2ace..abe179d 100644 (file)
@@ -82,6 +82,7 @@
        pinctrl-0 = <&rgmii_pins>;
        phy-mode = "rgmii";
        phy-handle = <&ext_rgmii_phy>;
+       phy-supply = <&reg_dc1sw>;
        status = "okay";
 };
 
@@ -95,7 +96,7 @@
 &mmc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&mmc2_pins>;
-       vmmc-supply = <&reg_vcc3v3>;
+       vmmc-supply = <&reg_dcdc1>;
        vqmmc-supply = <&reg_vcc1v8>;
        bus-width = <8>;
        non-removable;
index a5da18a..43418bd 100644 (file)
 
 #include "sun50i-a64.dtsi"
 
-/ {
-       reg_vcc3v3: vcc3v3 {
-               compatible = "regulator-fixed";
-               regulator-name = "vcc3v3";
-               regulator-min-microvolt = <3300000>;
-               regulator-max-microvolt = <3300000>;
-       };
-};
-
 &mmc0 {
        pinctrl-names = "default";
        pinctrl-0 = <&mmc0_pins>;
-       vmmc-supply = <&reg_vcc3v3>;
+       vmmc-supply = <&reg_dcdc1>;
        non-removable;
        disable-wp;
        bus-width = <4>;
index b6b7a56..a42fd79 100644 (file)
@@ -71,7 +71,7 @@
        pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
        vmmc-supply = <&reg_vcc3v3>;
        bus-width = <4>;
-       cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index 7c9bdc7..9db1931 100644 (file)
@@ -66,6 +66,7 @@
                                     <&cpu1>,
                                     <&cpu2>,
                                     <&cpu3>;
+               interrupt-parent = <&intc>;
        };
 
        psci {
index ead895a..1fb8b9d 100644 (file)
 
 &uart_B {
        clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_C {
        clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &vpu {
index 8ed981f..6524b89 100644 (file)
 
 &uart_A {
        clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_AO {
 
 &uart_B {
        clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_C {
        clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &vpu {
index e3b64d0..9c7724e 100644 (file)
                        cpm_ethernet: ethernet@0 {
                                compatible = "marvell,armada-7k-pp22";
                                reg = <0x0 0x100000>, <0x129000 0xb000>;
-                               clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>;
-                               clock-names = "pp_clk", "gop_clk", "mg_clk";
+                               clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>,
+                                        <&cpm_clk 1 5>, <&cpm_clk 1 18>;
+                               clock-names = "pp_clk", "gop_clk",
+                                             "mg_clk","axi_clk";
                                marvell,system-controller = <&cpm_syscon0>;
                                status = "disabled";
                                dma-coherent;
                                #size-cells = <0>;
                                compatible = "marvell,orion-mdio";
                                reg = <0x12a200 0x10>;
-                               clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>;
+                               clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>,
+                                        <&cpm_clk 1 6>, <&cpm_clk 1 18>;
                                status = "disabled";
                        };
 
                                compatible = "marvell,armada-cp110-sdhci";
                                reg = <0x780000 0x300>;
                                interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>;
-                               clock-names = "core";
-                               clocks = <&cpm_clk 1 4>;
+                               clock-names = "core","axi";
+                               clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>;
                                dma-coherent;
                                status = "disabled";
                        };
index 0d51096..87ac68b 100644 (file)
                        cps_ethernet: ethernet@0 {
                                compatible = "marvell,armada-7k-pp22";
                                reg = <0x0 0x100000>, <0x129000 0xb000>;
-                               clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>;
-                               clock-names = "pp_clk", "gop_clk", "mg_clk";
+                               clocks = <&cps_clk 1 3>, <&cps_clk 1 9>,
+                                        <&cps_clk 1 5>, <&cps_clk 1 18>;
+                               clock-names = "pp_clk", "gop_clk",
+                                             "mg_clk", "axi_clk";
                                marvell,system-controller = <&cps_syscon0>;
                                status = "disabled";
                                dma-coherent;
                                #size-cells = <0>;
                                compatible = "marvell,orion-mdio";
                                reg = <0x12a200 0x10>;
-                               clocks = <&cps_clk 1 9>, <&cps_clk 1 5>;
+                               clocks = <&cps_clk 1 9>, <&cps_clk 1 5>,
+                                        <&cps_clk 1 6>, <&cps_clk 1 18>;
                                status = "disabled";
                        };
 
index a298df7..dbe2648 100644 (file)
 &avb {
        pinctrl-0 = <&avb_pins>;
        pinctrl-names = "default";
-       renesas,no-ether-link;
        phy-handle = <&phy0>;
        status = "okay";
 
index 0d85b31..73439cf 100644 (file)
 &avb {
        pinctrl-0 = <&avb_pins>;
        pinctrl-names = "default";
-       renesas,no-ether-link;
        phy-handle = <&phy0>;
        status = "okay";
 
index d4f8078..3890468 100644 (file)
        assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
        assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
        clock_in_out = "input";
+       /* shows instability at 1GBit right now */
+       max-speed = <100>;
        phy-supply = <&vcc_io>;
        phy-mode = "rgmii";
        pinctrl-names = "default";
index 41d6184..2426da6 100644 (file)
        tsadc: tsadc@ff250000 {
                compatible = "rockchip,rk3328-tsadc";
                reg = <0x0 0xff250000 0x0 0x100>;
-               interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
+               interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
                assigned-clocks = <&cru SCLK_TSADC>;
                assigned-clock-rates = <50000>;
                clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
index 910628d..1fc5060 100644 (file)
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
        };
-
-       vdd_log: vdd-log {
-               compatible = "pwm-regulator";
-               pwms = <&pwm2 0 25000 0>;
-               regulator-name = "vdd_log";
-               regulator-min-microvolt = <800000>;
-               regulator-max-microvolt = <1400000>;
-               regulator-always-on;
-               regulator-boot-on;
-               status = "okay";
-       };
 };
 
 &cpu_b0 {
index dd7193a..6bdefb2 100644 (file)
@@ -40,7 +40,6 @@
 };
 
 &ethsc {
-       interrupt-parent = <&gpio>;
        interrupts = <0 8>;
 };
 
index d99e373..254d679 100644 (file)
@@ -40,7 +40,6 @@
 };
 
 &ethsc {
-       interrupt-parent = <&gpio>;
        interrupts = <0 8>;
 };
 
index 864feeb..f9f06fc 100644 (file)
@@ -38,8 +38,7 @@
 };
 
 &ethsc {
-       interrupt-parent = <&gpio>;
-       interrupts = <0 8>;
+       interrupts = <4 8>;
 };
 
 &serial0 {
index 48e7331..0ac2ace 100644 (file)
                        gpio-controller;
                        #gpio-cells = <2>;
                        gpio-ranges = <&pinctrl 0 0 0>,
-                                     <&pinctrl 96 0 0>,
-                                     <&pinctrl 160 0 0>;
+                                     <&pinctrl 104 0 0>,
+                                     <&pinctrl 168 0 0>;
                        gpio-ranges-group-names = "gpio_range0",
                                                  "gpio_range1",
                                                  "gpio_range2";
index aef72d8..8b16828 100644 (file)
@@ -512,4 +512,14 @@ alternative_else_nop_endif
 #endif
        .endm
 
+/**
+ * Errata workaround prior to disable MMU. Insert an ISB immediately prior
+ * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
+ */
+       .macro pre_disable_mmu_workaround
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
+       isb
+#endif
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H */
index 76d1cc8..9551307 100644 (file)
@@ -38,7 +38,7 @@
  *
  *     See Documentation/cachetlb.txt for more information. Please note that
  *     the implementation assumes non-aliasing VIPT D-cache and (aliasing)
- *     VIPT or ASID-tagged VIVT I-cache.
+ *     VIPT I-cache.
  *
  *     flush_cache_mm(mm)
  *
index ac67cfc..060e3a4 100644 (file)
@@ -60,6 +60,9 @@ enum ftr_type {
 #define FTR_VISIBLE    true    /* Feature visible to the user space */
 #define FTR_HIDDEN     false   /* Feature is hidden from the user */
 
+#define FTR_VISIBLE_IF_IS_ENABLED(config)              \
+       (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
+
 struct arm64_ftr_bits {
        bool            sign;   /* Value is signed ? */
        bool            visible;
index 235e77d..cbf08d7 100644 (file)
@@ -91,6 +91,7 @@
 #define BRCM_CPU_PART_VULCAN           0x516
 
 #define QCOM_CPU_PART_FALKOR_V1                0x800
+#define QCOM_CPU_PART_FALKOR           0xC00
 
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
+#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
 
 #ifndef __ASSEMBLY__
 
index 650344d..c4cd508 100644 (file)
@@ -132,11 +132,9 @@ static inline void efi_set_pgd(struct mm_struct *mm)
                         * Defer the switch to the current thread's TTBR0_EL1
                         * until uaccess_enable(). Restore the current
                         * thread's saved ttbr0 corresponding to its active_mm
-                        * (if different from init_mm).
                         */
                        cpu_set_reserved_ttbr0();
-                       if (current->active_mm != &init_mm)
-                               update_saved_ttbr0(current, current->active_mm);
+                       update_saved_ttbr0(current, current->active_mm);
                }
        }
 }
index 7f069ff..715d395 100644 (file)
 #define VTCR_EL2_FLAGS                 (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
 #define VTTBR_X                                (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
 
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  (UL(48))
 #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
index 674912d..ea6cb5b 100644 (file)
@@ -370,6 +370,7 @@ void kvm_arm_init_debug(void);
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
index 3257895..9d155fa 100644 (file)
@@ -156,29 +156,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 
 #define init_new_context(tsk,mm)       ({ atomic64_set(&(mm)->context.id, 0); 0; })
 
-/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm:  describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 static inline void update_saved_ttbr0(struct task_struct *tsk,
                                      struct mm_struct *mm)
 {
-       if (system_uses_ttbr0_pan()) {
-               BUG_ON(mm->pgd == swapper_pg_dir);
-               task_thread_info(tsk)->ttbr0 =
-                       virt_to_phys(mm->pgd) | ASID(mm) << 48;
-       }
+       u64 ttbr;
+
+       if (!system_uses_ttbr0_pan())
+               return;
+
+       if (mm == &init_mm)
+               ttbr = __pa_symbol(empty_zero_page);
+       else
+               ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+
+       task_thread_info(tsk)->ttbr0 = ttbr;
 }
 #else
 static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -187,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
 }
 #endif
 
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+       /*
+        * We don't actually care about the ttbr0 mapping, so point it at the
+        * zero page.
+        */
+       update_saved_ttbr0(tsk, &init_mm);
+}
+
 static inline void __switch_mm(struct mm_struct *next)
 {
        unsigned int cpu = smp_processor_id();
@@ -214,11 +216,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
         * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
         * value may have not been initialised yet (activate_mm caller) or the
         * ASID has changed since the last run (following the context switch
-        * of another thread of the same process). Avoid setting the reserved
-        * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
+        * of another thread of the same process).
         */
-       if (next != &init_mm)
-               update_saved_ttbr0(tsk, next);
+       update_saved_ttbr0(tsk, next);
 }
 
 #define deactivate_mm(tsk,mm)  do { } while (0)
index 19bd976..4f76617 100644 (file)
@@ -32,7 +32,7 @@ struct mod_arch_specific {
        struct mod_plt_sec      init;
 
        /* for CONFIG_DYNAMIC_FTRACE */
-       void                    *ftrace_trampoline;
+       struct plt_entry        *ftrace_trampoline;
 };
 #endif
 
@@ -45,4 +45,48 @@ extern u64 module_alloc_base;
 #define module_alloc_base      ((u64)_etext - MODULES_VSIZE)
 #endif
 
+struct plt_entry {
+       /*
+        * A program that conforms to the AArch64 Procedure Call Standard
+        * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
+        * IP1 (x17) may be inserted at any branch instruction that is
+        * exposed to a relocation that supports long branches. Since that
+        * is exactly what we are dealing with here, we are free to use x16
+        * as a scratch register in the PLT veneers.
+        */
+       __le32  mov0;   /* movn x16, #0x....                    */
+       __le32  mov1;   /* movk x16, #0x...., lsl #16           */
+       __le32  mov2;   /* movk x16, #0x...., lsl #32           */
+       __le32  br;     /* br   x16                             */
+};
+
+static inline struct plt_entry get_plt_entry(u64 val)
+{
+       /*
+        * MOVK/MOVN/MOVZ opcode:
+        * +--------+------------+--------+-----------+-------------+---------+
+        * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
+        * +--------+------------+--------+-----------+-------------+---------+
+        *
+        * Rd     := 0x10 (x16)
+        * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
+        * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
+        * sf     := 1 (64-bit variant)
+        */
+       return (struct plt_entry){
+               cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
+               cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
+               cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
+               cpu_to_le32(0xd61f0200)
+       };
+}
+
+static inline bool plt_entries_equal(const struct plt_entry *a,
+                                    const struct plt_entry *b)
+{
+       return a->mov0 == b->mov0 &&
+              a->mov1 == b->mov1 &&
+              a->mov2 == b->mov2;
+}
+
 #endif /* __ASM_MODULE_H */
index 8d5cbec..f9ccc36 100644 (file)
@@ -18,6 +18,7 @@
 #define __ASM_PERF_EVENT_H
 
 #include <asm/stack_pointer.h>
+#include <asm/ptrace.h>
 
 #define        ARMV8_PMU_MAX_COUNTERS  32
 #define        ARMV8_PMU_COUNTER_MASK  (ARMV8_PMU_MAX_COUNTERS - 1)
@@ -79,6 +80,7 @@ struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 #define perf_misc_flags(regs)  perf_misc_flags(regs)
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
 #endif
 
 #define perf_arch_fetch_caller_regs(regs, __ip) { \
index c9530b5..bdcc7f1 100644 (file)
@@ -42,6 +42,8 @@
 #include <asm/cmpxchg.h>
 #include <asm/fixmap.h>
 #include <linux/mmdebug.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
 
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
@@ -149,12 +151,20 @@ static inline pte_t pte_mkwrite(pte_t pte)
 
 static inline pte_t pte_mkclean(pte_t pte)
 {
-       return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
+       pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
+       pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+
+       return pte;
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-       return set_pte_bit(pte, __pgprot(PTE_DIRTY));
+       pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
+
+       if (pte_write(pte))
+               pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+
+       return pte;
 }
 
 static inline pte_t pte_mkold(pte_t pte)
@@ -207,9 +217,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
        }
 }
 
-struct mm_struct;
-struct vm_area_struct;
-
 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 
 /*
@@ -238,7 +245,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
         * hardware updates of the pte (ptep_set_access_flags safely changes
         * valid ptes without going through an invalid entry).
         */
-       if (pte_valid(*ptep) && pte_valid(pte)) {
+       if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
+          (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
                VM_WARN_ONCE(!pte_young(pte),
                             "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
                             __func__, pte_val(*ptep), pte_val(pte));
@@ -345,7 +353,6 @@ static inline int pmd_protnone(pmd_t pmd)
 
 #define pmd_thp_or_huge(pmd)   (pmd_huge(pmd) || pmd_trans_huge(pmd))
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 
 #define pmd_mkhuge(pmd)                (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
@@ -642,28 +649,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
- * ptep_set_wrprotect - mark read-only while preserving the hardware update of
- * the Access Flag.
+ * ptep_set_wrprotect - mark read-only while trasferring potential hardware
+ * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
  */
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
 {
        pte_t old_pte, pte;
 
-       /*
-        * ptep_set_wrprotect() is only called on CoW mappings which are
-        * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
-        * PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
-        * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
-        * protection_map[]. There is no race with the hardware update of the
-        * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
-        * is set.
-        */
-       VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
-                    "%s: potential race with hardware DBM", __func__);
        pte = READ_ONCE(*ptep);
        do {
                old_pte = pte;
+               /*
+                * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+                * clear), set the PTE_DIRTY bit.
+                */
+               if (pte_hw_dirty(pte))
+                       pte = pte_mkdirty(pte);
                pte = pte_wrprotect(pte);
                pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
                                               pte_val(old_pte), pte_val(pte));
index eb43128..740aa03 100644 (file)
@@ -51,8 +51,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,                                    \
 }
 
-#define init_stack             (init_thread_union.stack)
-
 #define thread_saved_pc(tsk)   \
        ((unsigned long)(tsk->thread.cpu_context.pc))
 #define thread_saved_sp(tsk)   \
diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..b551b74
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 8265dd7..067baac 100644 (file)
@@ -61,6 +61,3 @@ extra-y                                       += $(head-y) vmlinux.lds
 ifeq ($(CONFIG_DEBUG_EFI),y)
 AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
 endif
-
-# will be included by each individual module but not by the core kernel itself
-extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o
index 65f42d2..2a752cb 100644 (file)
@@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart)
        mrs     x12, sctlr_el1
        ldr     x13, =SCTLR_ELx_FLAGS
        bic     x12, x12, x13
+       pre_disable_mmu_workaround
        msr     sctlr_el1, x12
        isb
 
index d169782..ea00124 100644 (file)
@@ -31,13 +31,13 @@ extern const struct cpu_operations cpu_psci_ops;
 
 const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
 
-static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = {
        &smp_spin_table_ops,
        &cpu_psci_ops,
        NULL,
 };
 
-static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *const acpi_supported_cpu_ops[] __initconst = {
 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
        &acpi_parking_protocol_ops,
 #endif
@@ -47,7 +47,7 @@ static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
 
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 {
-       const struct cpu_operations **ops;
+       const struct cpu_operations *const *ops;
 
        ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 
index c5ba009..a73a592 100644 (file)
@@ -145,7 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                                  FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
        S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
        S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
index 4e6ad35..6b9736c 100644 (file)
@@ -96,6 +96,7 @@ ENTRY(entry)
        mrs     x0, sctlr_el2
        bic     x0, x0, #1 << 0 // clear SCTLR.M
        bic     x0, x0, #1 << 2 // clear SCTLR.C
+       pre_disable_mmu_workaround
        msr     sctlr_el2, x0
        isb
        b       2f
@@ -103,6 +104,7 @@ ENTRY(entry)
        mrs     x0, sctlr_el1
        bic     x0, x0, #1 << 0 // clear SCTLR.M
        bic     x0, x0, #1 << 2 // clear SCTLR.C
+       pre_disable_mmu_workaround
        msr     sctlr_el1, x0
        isb
 2:
index 143b3e7..fae81f7 100644 (file)
  *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
  *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
  */
-static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+struct fpsimd_last_state_struct {
+       struct fpsimd_state *st;
+       bool sve_in_use;
+};
+
+static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
 
 /* Default VL for tasks that don't set it explicitly: */
 static int sve_default_vl = -1;
@@ -905,7 +910,7 @@ void fpsimd_thread_switch(struct task_struct *next)
                 */
                struct fpsimd_state *st = &next->thread.fpsimd_state;
 
-               if (__this_cpu_read(fpsimd_last_state) == st
+               if (__this_cpu_read(fpsimd_last_state.st) == st
                    && st->cpu == smp_processor_id())
                        clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
                else
@@ -991,6 +996,21 @@ void fpsimd_signal_preserve_current_state(void)
                sve_to_fpsimd(current);
 }
 
+/*
+ * Associate current's FPSIMD context with this cpu
+ * Preemption must be disabled when calling this function.
+ */
+static void fpsimd_bind_to_cpu(void)
+{
+       struct fpsimd_last_state_struct *last =
+               this_cpu_ptr(&fpsimd_last_state);
+       struct fpsimd_state *st = &current->thread.fpsimd_state;
+
+       last->st = st;
+       last->sve_in_use = test_thread_flag(TIF_SVE);
+       st->cpu = smp_processor_id();
+}
+
 /*
  * Load the userland FPSIMD state of 'current' from memory, but only if the
  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
@@ -1004,11 +1024,8 @@ void fpsimd_restore_current_state(void)
        local_bh_disable();
 
        if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               struct fpsimd_state *st = &current->thread.fpsimd_state;
-
                task_fpsimd_load();
-               __this_cpu_write(fpsimd_last_state, st);
-               st->cpu = smp_processor_id();
+               fpsimd_bind_to_cpu();
        }
 
        local_bh_enable();
@@ -1026,18 +1043,14 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
 
        local_bh_disable();
 
-       if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
-               current->thread.fpsimd_state = *state;
+       current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd;
+       if (system_supports_sve() && test_thread_flag(TIF_SVE))
                fpsimd_to_sve(current);
-       }
-       task_fpsimd_load();
 
-       if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               struct fpsimd_state *st = &current->thread.fpsimd_state;
+       task_fpsimd_load();
 
-               __this_cpu_write(fpsimd_last_state, st);
-               st->cpu = smp_processor_id();
-       }
+       if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
+               fpsimd_bind_to_cpu();
 
        local_bh_enable();
 }
@@ -1052,7 +1065,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
 
 static inline void fpsimd_flush_cpu_state(void)
 {
-       __this_cpu_write(fpsimd_last_state, NULL);
+       __this_cpu_write(fpsimd_last_state.st, NULL);
 }
 
 /*
@@ -1065,14 +1078,10 @@ static inline void fpsimd_flush_cpu_state(void)
 #ifdef CONFIG_ARM64_SVE
 void sve_flush_cpu_state(void)
 {
-       struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state);
-       struct task_struct *tsk;
-
-       if (!fpstate)
-               return;
+       struct fpsimd_last_state_struct const *last =
+               this_cpu_ptr(&fpsimd_last_state);
 
-       tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state);
-       if (test_tsk_thread_flag(tsk, TIF_SVE))
+       if (last->st && last->sve_in_use)
                fpsimd_flush_cpu_state();
 }
 #endif /* CONFIG_ARM64_SVE */
@@ -1267,7 +1276,7 @@ static inline void fpsimd_pm_init(void) { }
 #ifdef CONFIG_HOTPLUG_CPU
 static int fpsimd_cpu_dead(unsigned int cpu)
 {
-       per_cpu(fpsimd_last_state, cpu) = NULL;
+       per_cpu(fpsimd_last_state.st, cpu) = NULL;
        return 0;
 }
 
diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S
deleted file mode 100644 (file)
index 00c4025..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-       .section        ".text.ftrace_trampoline", "ax"
-       .align          3
-0:     .quad           0
-__ftrace_trampoline:
-       ldr             x16, 0b
-       br              x16
-ENDPROC(__ftrace_trampoline)
index c13b1fc..50986e3 100644 (file)
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
-               unsigned long *trampoline;
+               struct plt_entry trampoline;
                struct module *mod;
 
                /*
@@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * is added in the future, but for now, the pr_err() below
                 * deals with a theoretical issue only.
                 */
-               trampoline = (unsigned long *)mod->arch.ftrace_trampoline;
-               if (trampoline[0] != addr) {
-                       if (trampoline[0] != 0) {
+               trampoline = get_plt_entry(addr);
+               if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+                                      &trampoline)) {
+                       if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+                                              &(struct plt_entry){})) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
                        }
 
                        /* point the trampoline to our ftrace entry point */
                        module_disable_ro(mod);
-                       trampoline[0] = addr;
+                       *mod->arch.ftrace_trampoline = trampoline;
                        module_enable_ro(mod, true);
 
                        /* update trampoline before patching in the branch */
                        smp_wmb();
                }
-               addr = (unsigned long)&trampoline[1];
+               addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
 #else /* CONFIG_ARM64_MODULE_PLTS */
                return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
index 67e86a0..e3cb9fb 100644 (file)
@@ -750,6 +750,7 @@ __primary_switch:
         * to take into account by discarding the current kernel mapping and
         * creating a new one.
         */
+       pre_disable_mmu_workaround
        msr     sctlr_el1, x20                  // disable the MMU
        isb
        bl      __create_page_tables            // recreate kernel mapping
index 749f817..74bb56f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/perf_event.h>
 #include <linux/ptrace.h>
 #include <linux/smp.h>
+#include <linux/uaccess.h>
 
 #include <asm/compat.h>
 #include <asm/current.h>
@@ -36,7 +37,6 @@
 #include <asm/traps.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
-#include <asm/uaccess.h>
 
 /* Breakpoint currently in use for each BRP. */
 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
index d05dbe6..ea640f9 100644 (file)
 #include <linux/module.h>
 #include <linux/sort.h>
 
-struct plt_entry {
-       /*
-        * A program that conforms to the AArch64 Procedure Call Standard
-        * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
-        * IP1 (x17) may be inserted at any branch instruction that is
-        * exposed to a relocation that supports long branches. Since that
-        * is exactly what we are dealing with here, we are free to use x16
-        * as a scratch register in the PLT veneers.
-        */
-       __le32  mov0;   /* movn x16, #0x....                    */
-       __le32  mov1;   /* movk x16, #0x...., lsl #16           */
-       __le32  mov2;   /* movk x16, #0x...., lsl #32           */
-       __le32  br;     /* br   x16                             */
-};
-
 static bool in_init(const struct module *mod, void *loc)
 {
        return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
@@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
        int i = pltsec->plt_num_entries;
        u64 val = sym->st_value + rela->r_addend;
 
-       /*
-        * MOVK/MOVN/MOVZ opcode:
-        * +--------+------------+--------+-----------+-------------+---------+
-        * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
-        * +--------+------------+--------+-----------+-------------+---------+
-        *
-        * Rd     := 0x10 (x16)
-        * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
-        * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
-        * sf     := 1 (64-bit variant)
-        */
-       plt[i] = (struct plt_entry){
-               cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
-               cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
-               cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
-               cpu_to_le32(0xd61f0200)
-       };
+       plt[i] = get_plt_entry(val);
 
        /*
         * Check if the entry we just created is a duplicate. Given that the
         * relocations are sorted, this will be the last entry we allocated.
         * (if one exists).
         */
-       if (i > 0 &&
-           plt[i].mov0 == plt[i - 1].mov0 &&
-           plt[i].mov1 == plt[i - 1].mov1 &&
-           plt[i].mov2 == plt[i - 1].mov2)
+       if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
                return (u64)&plt[i - 1];
 
        pltsec->plt_num_entries++;
@@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
        unsigned long core_plts = 0;
        unsigned long init_plts = 0;
        Elf64_Sym *syms = NULL;
+       Elf_Shdr *tramp = NULL;
        int i;
 
        /*
@@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
                        mod->arch.core.plt = sechdrs + i;
                else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
                        mod->arch.init.plt = sechdrs + i;
+               else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
+                        !strcmp(secstrings + sechdrs[i].sh_name,
+                                ".text.ftrace_trampoline"))
+                       tramp = sechdrs + i;
                else if (sechdrs[i].sh_type == SHT_SYMTAB)
                        syms = (Elf64_Sym *)sechdrs[i].sh_addr;
        }
@@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
        mod->arch.init.plt_num_entries = 0;
        mod->arch.init.plt_max_entries = init_plts;
 
+       if (tramp) {
+               tramp->sh_type = SHT_NOBITS;
+               tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+               tramp->sh_addralign = __alignof__(struct plt_entry);
+               tramp->sh_size = sizeof(struct plt_entry);
+       }
+
        return 0;
 }
index f7c9781..22e36a2 100644 (file)
@@ -1,4 +1,5 @@
 SECTIONS {
        .plt (NOLOAD) : { BYTE(0) }
        .init.plt (NOLOAD) : { BYTE(0) }
+       .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
 }
index 9eaef51..3affca3 100644 (file)
@@ -262,12 +262,6 @@ static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 
        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
-
-       [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
-       [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
-
-       [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
-       [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
 };
 
 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
index b2adcce..6b7dcf4 100644 (file)
@@ -314,6 +314,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        clear_tsk_thread_flag(p, TIF_SVE);
        p->thread.sve_state = NULL;
 
+       /*
+        * In case p was allocated the same task_struct pointer as some
+        * other recently-exited task, make sure p is disassociated from
+        * any cpu that may have run that now-exited task recently.
+        * Otherwise we could erroneously skip reloading the FPSIMD
+        * registers for p.
+        */
+       fpsimd_flush_task_state(p);
+
        if (likely(!(p->flags & PF_KTHREAD))) {
                *childregs = *current_pt_regs();
                childregs->regs[0] = 0;
index ce704a4..f407e42 100644 (file)
@@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel)
        mrs     x0, sctlr_el2
        ldr     x1, =SCTLR_ELx_FLAGS
        bic     x0, x0, x1
+       pre_disable_mmu_workaround
        msr     sctlr_el2, x0
        isb
 1:
index dbadfaf..fa63b28 100644 (file)
@@ -221,3 +221,24 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
                }
        }
 }
+
+
+/*
+ * After successfully emulating an instruction, we might want to
+ * return to user space with a KVM_EXIT_DEBUG. We can only do this
+ * once the emulation is complete, though, so for userspace emulations
+ * we have to wait until we have re-entered KVM before calling this
+ * helper.
+ *
+ * Return true (and set exit_reason) to return to userspace or false
+ * if no further action is required.
+ */
+bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+               run->exit_reason = KVM_EXIT_DEBUG;
+               run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
+               return true;
+       }
+       return false;
+}
index b712479..e60494f 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_psci.h>
+#include <asm/debug-monitors.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -44,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
        ret = kvm_psci_call(vcpu);
        if (ret < 0) {
-               kvm_inject_undefined(vcpu);
+               vcpu_set_reg(vcpu, 0, ~0UL);
                return 1;
        }
 
@@ -53,7 +54,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       kvm_inject_undefined(vcpu);
+       vcpu_set_reg(vcpu, 0, ~0UL);
        return 1;
 }
 
@@ -186,6 +187,40 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
        return arm_exit_handlers[hsr_ec];
 }
 
+/*
+ * We may be single-stepping an emulated instruction. If the emulation
+ * has been completed in the kernel, we can return to userspace with a
+ * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
+ * emulation first.
+ */
+static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int handled;
+
+       /*
+        * See ARM ARM B1.14.1: "Hyp traps on instructions
+        * that fail their condition code check"
+        */
+       if (!kvm_condition_valid(vcpu)) {
+               kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+               handled = 1;
+       } else {
+               exit_handle_fn exit_handler;
+
+               exit_handler = kvm_get_exit_handler(vcpu);
+               handled = exit_handler(vcpu, run);
+       }
+
+       /*
+        * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
+        * structure if we need to return to userspace.
+        */
+       if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
+               handled = 0;
+
+       return handled;
+}
+
 /*
  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
  * proper exit to userspace.
@@ -193,8 +228,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
                       int exception_index)
 {
-       exit_handle_fn exit_handler;
-
        if (ARM_SERROR_PENDING(exception_index)) {
                u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
 
@@ -220,20 +253,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
                return 1;
        case ARM_EXCEPTION_EL1_SERROR:
                kvm_inject_vabt(vcpu);
-               return 1;
-       case ARM_EXCEPTION_TRAP:
-               /*
-                * See ARM ARM B1.14.1: "Hyp traps on instructions
-                * that fail their condition code check"
-                */
-               if (!kvm_condition_valid(vcpu)) {
-                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+               /* We may still need to return for single-step */
+               if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
+                       && kvm_arm_handle_step_debug(vcpu, run))
+                       return 0;
+               else
                        return 1;
-               }
-
-               exit_handler = kvm_get_exit_handler(vcpu);
-
-               return exit_handler(vcpu, run);
+       case ARM_EXCEPTION_TRAP:
+               return handle_trap_exceptions(vcpu, run);
        case ARM_EXCEPTION_HYP_GONE:
                /*
                 * EL2 has been reset to the hyp-stub. This happens when a guest
index 3f96155..870828c 100644 (file)
@@ -151,6 +151,7 @@ reset:
        mrs     x5, sctlr_el2
        ldr     x6, =SCTLR_ELx_FLAGS
        bic     x5, x5, x6              // Clear SCTL_M and etc
+       pre_disable_mmu_workaround
        msr     sctlr_el2, x5
        isb
 
index 321c9c0..f4363d4 100644 (file)
@@ -74,6 +74,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
 {
        u64 reg;
 
+       /* Clear pmscr in case of early return */
+       *pmscr_el1 = 0;
+
        /* SPE present on this CPU? */
        if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
                                                  ID_AA64DFR0_PMSVER_SHIFT))
index 525c01f..f7c651f 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
 #include <asm/fpsimd.h>
+#include <asm/debug-monitors.h>
 
 static bool __hyp_text __fpsimd_enabled_nvhe(void)
 {
@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
        return true;
 }
 
-static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
+/* Skip an instruction which has been emulated. Returns true if
+ * execution can continue or false if we need to exit hyp mode because
+ * single-step was in effect.
+ */
+static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
 {
        *vcpu_pc(vcpu) = read_sysreg_el2(elr);
 
@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
        }
 
        write_sysreg_el2(*vcpu_pc(vcpu), elr);
+
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+               vcpu->arch.fault.esr_el2 =
+                       (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
+               return false;
+       } else {
+               return true;
+       }
 }
 
 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -342,13 +355,21 @@ again:
                        int ret = __vgic_v2_perform_cpuif_access(vcpu);
 
                        if (ret == 1) {
-                               __skip_instr(vcpu);
-                               goto again;
+                               if (__skip_instr(vcpu))
+                                       goto again;
+                               else
+                                       exit_code = ARM_EXCEPTION_TRAP;
                        }
 
                        if (ret == -1) {
-                               /* Promote an illegal access to an SError */
-                               __skip_instr(vcpu);
+                               /* Promote an illegal access to an
+                                * SError. If we would be returning
+                                * due to single-step clear the SS
+                                * bit so handle_exit knows what to
+                                * do after dealing with the error.
+                                */
+                               if (!__skip_instr(vcpu))
+                                       *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
                                exit_code = ARM_EXCEPTION_EL1_SERROR;
                        }
 
@@ -363,8 +384,10 @@ again:
                int ret = __vgic_v3_perform_cpuif_access(vcpu);
 
                if (ret == 1) {
-                       __skip_instr(vcpu);
-                       goto again;
+                       if (__skip_instr(vcpu))
+                               goto again;
+                       else
+                               exit_code = ARM_EXCEPTION_TRAP;
                }
 
                /* 0 falls through to be handled out of EL2 */
index ab9f5f0..6f40170 100644 (file)
@@ -96,12 +96,6 @@ static void flush_context(unsigned int cpu)
 
        set_reserved_asid_bits();
 
-       /*
-        * Ensure the generation bump is observed before we xchg the
-        * active_asids.
-        */
-       smp_wmb();
-
        for_each_possible_cpu(i) {
                asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
                /*
@@ -117,7 +111,10 @@ static void flush_context(unsigned int cpu)
                per_cpu(reserved_asids, i) = asid;
        }
 
-       /* Queue a TLB invalidate and flush the I-cache if necessary. */
+       /*
+        * Queue a TLB invalidation for each CPU to perform on next
+        * context-switch
+        */
        cpumask_setall(&tlb_flush_pending);
 }
 
@@ -202,11 +199,18 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
        asid = atomic64_read(&mm->context.id);
 
        /*
-        * The memory ordering here is subtle. We rely on the control
-        * dependency between the generation read and the update of
-        * active_asids to ensure that we are synchronised with a
-        * parallel rollover (i.e. this pairs with the smp_wmb() in
-        * flush_context).
+        * The memory ordering here is subtle.
+        * If our ASID matches the current generation, then we update
+        * our active_asids entry with a relaxed xchg. Racing with a
+        * concurrent rollover means that either:
+        *
+        * - We get a zero back from the xchg and end up waiting on the
+        *   lock. Taking the lock synchronises with the rollover and so
+        *   we are forced to see the updated generation.
+        *
+        * - We get a valid ASID back from the xchg, which means the
+        *   relaxed xchg in flush_context will treat us as reserved
+        *   because atomic RmWs are totally ordered for a given location.
         */
        if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
            && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
index ca74a2a..7b60d62 100644 (file)
@@ -389,7 +389,7 @@ void ptdump_check_wx(void)
                .check_wx = true,
        };
 
-       walk_pgd(&st, &init_mm, 0);
+       walk_pgd(&st, &init_mm, VA_START);
        note_page(&st, 0, 0, 0);
        if (st.wx_pages || st.uxn_pages)
                pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
index 22168cd..9b7f89d 100644 (file)
@@ -574,7 +574,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
        struct siginfo info;
        const struct fault_info *inf;
-       int ret = 0;
 
        inf = esr_to_fault_info(esr);
        pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
@@ -589,7 +588,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
                if (interrupts_enabled(regs))
                        nmi_enter();
 
-               ret = ghes_notify_sea();
+               ghes_notify_sea();
 
                if (interrupts_enabled(regs))
                        nmi_exit();
@@ -604,7 +603,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
                info.si_addr  = (void __user *)addr;
        arm64_notify_die("", regs, &info, esr);
 
-       return ret;
+       return 0;
 }
 
 static const struct fault_info fault_info[] = {
index 5960bef..00e7b90 100644 (file)
@@ -476,6 +476,8 @@ void __init arm64_memblock_init(void)
 
        reserve_elfcorehdr();
 
+       high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
+
        dma_contiguous_reserve(arm64_dma_phys_limit);
 
        memblock_allow_resize();
@@ -502,7 +504,6 @@ void __init bootmem_init(void)
        sparse_init();
        zone_sizes_init(min, max);
 
-       high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
        memblock_dump_all();
 }
 
index 371c5f0..051e71e 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/page.h>
 #include <asm/tlbflush.h>
 
-static struct kmem_cache *pgd_cache;
+static struct kmem_cache *pgd_cache __ro_after_init;
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
index ba38d40..bb32f7f 100644 (file)
@@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
 /* Stack must be multiples of 16B */
 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
 
-#define PROLOGUE_OFFSET 8
+/* Tail call offset to jump into */
+#define PROLOGUE_OFFSET 7
 
 static int build_prologue(struct jit_ctx *ctx)
 {
@@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx)
        /* Initialize tail_call_cnt */
        emit(A64_MOVZ(1, tcc, 0, 0), ctx);
 
-       /* 4 byte extra for skb_copy_bits buffer */
-       ctx->stack_size = prog->aux->stack_depth + 4;
-       ctx->stack_size = STACK_ALIGN(ctx->stack_size);
-
-       /* Set up function call stack */
-       emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
-
        cur_offset = ctx->idx - idx0;
        if (cur_offset != PROLOGUE_OFFSET) {
                pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
                            cur_offset, PROLOGUE_OFFSET);
                return -1;
        }
+
+       /* 4 byte extra for skb_copy_bits buffer */
+       ctx->stack_size = prog->aux->stack_depth + 4;
+       ctx->stack_size = STACK_ALIGN(ctx->stack_size);
+
+       /* Set up function call stack */
+       emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
        return 0;
 }
 
@@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        emit(A64_LDR64(prg, tmp, prg), ctx);
        emit(A64_CBZ(1, prg, jmp_offset), ctx);
 
-       /* goto *(prog->bpf_func + prologue_size); */
+       /* goto *(prog->bpf_func + prologue_offset); */
        off = offsetof(struct bpf_prog, bpf_func);
        emit_a64_mov_i64(tmp, off, ctx);
        emit(A64_LDR64(tmp, prg, tmp), ctx);
        emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
+       emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
        emit(A64_BR(tmp), ctx);
 
        /* out: */
index 2966b93..a5aeab4 100644 (file)
@@ -56,8 +56,6 @@ struct thread_info {
        .cpu            = 0,                    \
        .preempt_count  = INIT_PREEMPT_COUNT,   \
 }
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
 
 /* Given a task stack pointer, you can find its corresponding
  * thread_info structure just by masking it to the THREAD_SIZE
index aa624b4..2240b38 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index acc70c1..59a5697 100644 (file)
@@ -60,9 +60,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* get the thread information struct of current task */
 static inline __attribute__((const))
 struct thread_info *current_thread_info(void)
index 67ee896..26644e1 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 124dd5e..ee4d8b0 100644 (file)
@@ -26,13 +26,6 @@ struct task_struct;
  */
 #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 3))
 
-/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
- * normally, the stack is found by doing something like p + THREAD_SIZE
- * in CRIS, a page is 8192 bytes, which seems like a sane size
- */
-#define THREAD_SIZE       PAGE_SIZE
-#define THREAD_SIZE_ORDER (0)
-
 /*
  * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.
  * This macro allows us to find those regs for a task.
@@ -59,8 +52,6 @@ static inline void release_thread(struct task_struct *dead_task)
         /* Nothing needs to be done.  */
 }
 
-#define init_stack      (init_thread_union.stack)
-
 #define cpu_relax()     barrier()
 
 void default_idle(void);
index 472830c..996fef3 100644 (file)
 #endif
 
 
+/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
+ * normally, the stack is found by doing something like p + THREAD_SIZE
+ * in CRIS, a page is 8192 bytes, which seems like a sane size
+ */
+#define THREAD_SIZE       PAGE_SIZE
+#define THREAD_SIZE_ORDER (0)
+
 /*
  * low level task data that entry.S needs immediate access to
  * - this struct should fit entirely inside of one cache line
@@ -56,8 +63,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,                    \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-
 #endif /* !__ASSEMBLY__ */
 
 /*
index 3687b54..3470c6e 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 6d1dbc1..9b232e0 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/page.h>
+#include <asm/thread_info.h>
 
 #ifdef CONFIG_ETRAX_VMEM_SIZE
 #define __CONFIG_ETRAX_VMEM_SIZE CONFIG_ETRAX_VMEM_SIZE
index ccba3b6..0f95084 100644 (file)
@@ -64,9 +64,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 register struct thread_info *__current_thread_info asm("gr15");
 
index b15bf6b..14a2e9a 100644 (file)
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bpf_perf_event.h
index 072b92c..0cdaa30 100644 (file)
@@ -46,9 +46,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
index 187aed8..2f65f78 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index b80fe1d..f41f9c6 100644 (file)
@@ -84,9 +84,6 @@ struct thread_info {
        .regs = NULL,                   \
 }
 
-#define init_thread_info        (init_thread_union.thread_info)
-#define init_stack              (init_thread_union.stack)
-
 /* Tacky preprocessor trickery */
 #define        qqstr(s) qstr(s)
 #define qstr(s) #s
index cb5df3a..41a176d 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index ec87e67..ad69d18 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/asm-offsets.h>   /*  Most of the kernel defines are here  */
 #include <asm/mem-layout.h>    /*  except for page_offset  */
 #include <asm/cache.h>         /*  and now we're pulling cache line size  */
+#include <asm/thread_info.h>   /*  and we need THREAD_SIZE too */
+
 OUTPUT_ARCH(hexagon)
 ENTRY(stext)
 
index 49583c5..315c51f 100644 (file)
@@ -43,7 +43,7 @@ config IA64
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select GENERIC_IOMAP
        select GENERIC_SMP_IDLE_THREAD
-       select ARCH_INIT_TASK
+       select ARCH_TASK_STRUCT_ON_STACK
        select ARCH_TASK_STRUCT_ALLOCATOR
        select ARCH_THREAD_STACK_ALLOCATOR
        select ARCH_CLOCKSOURCE_DATA
index c100d78..2dd7f51 100644 (file)
@@ -42,7 +42,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
 endif
 
 KBUILD_CFLAGS += $(cflags-y)
-head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
+head-y := arch/ia64/kernel/head.o
 
 libs-y                         += arch/ia64/lib/
 core-y                         += arch/ia64/kernel/ arch/ia64/mm/
index 28e02c9..762eeb0 100644 (file)
@@ -65,29 +65,30 @@ ia64_atomic_fetch_##op (int i, atomic_t *v)                         \
 ATOMIC_OPS(add, +)
 ATOMIC_OPS(sub, -)
 
-#define atomic_add_return(i,v)                                         \
+#ifdef __OPTIMIZE__
+#define __ia64_atomic_const(i) __builtin_constant_p(i) ?               \
+               ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||       \
+                (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
+
+#define atomic_add_return(i, v)                                                \
 ({                                                                     \
-       int __ia64_aar_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
-               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
-               : ia64_atomic_add(__ia64_aar_i, v);                     \
+       int __i = (i);                                                  \
+       static const int __ia64_atomic_p = __ia64_atomic_const(i);      \
+       __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) :      \
+                               ia64_atomic_add(__i, v);                \
 })
 
-#define atomic_sub_return(i,v)                                         \
+#define atomic_sub_return(i, v)                                                \
 ({                                                                     \
-       int __ia64_asr_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
-               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
-               : ia64_atomic_sub(__ia64_asr_i, v);                     \
+       int __i = (i);                                                  \
+       static const int __ia64_atomic_p = __ia64_atomic_const(i);      \
+       __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) :     \
+                               ia64_atomic_sub(__i, v);                \
 })
+#else
+#define atomic_add_return(i, v)        ia64_atomic_add(i, v)
+#define atomic_sub_return(i, v)        ia64_atomic_sub(i, v)
+#endif
 
 #define atomic_fetch_add(i,v)                                          \
 ({                                                                     \
index 1d172a4..64a1011 100644 (file)
@@ -12,6 +12,8 @@
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 
+#define THREAD_SIZE                    KERNEL_STACK_SIZE
+
 #ifndef __ASSEMBLY__
 
 /*
@@ -41,8 +43,6 @@ struct thread_info {
 #endif
 };
 
-#define THREAD_SIZE                    KERNEL_STACK_SIZE
-
 #define INIT_THREAD_INFO(tsk)                  \
 {                                              \
        .task           = &tsk,                 \
index 13a97aa..f5c6967 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
index 14ad79f..0b4c65a 100644 (file)
@@ -7,7 +7,7 @@ ifdef CONFIG_DYNAMIC_FTRACE
 CFLAGS_REMOVE_ftrace.o = -pg
 endif
 
-extra-y        := head.o init_task.o vmlinux.lds
+extra-y        := head.o vmlinux.lds
 
 obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o     \
         irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o          \
index 1d29b2f..1dacbf5 100644 (file)
@@ -504,6 +504,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
        if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
                return -1;
 
+       if (num_node_memblks >= NR_NODE_MEMBLKS) {
+               pr_err("NUMA: too many memblk ranges\n");
+               return -EINVAL;
+       }
+
        /* record this node in proximity bitmap */
        pxm_bit_set(pxm);
 
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
deleted file mode 100644 (file)
index 8df9245..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is where we statically allocate and initialize the initial
- * task.
- *
- * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init_task.h>
-#include <linux/mqueue.h>
-
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-/*
- * Initial task structure.
- *
- * We need to make sure that this is properly aligned due to the way process stacks are
- * handled. This is done by having a special ".data..init_task" section...
- */
-#define init_thread_info       init_task_mem.s.thread_info
-#define init_stack             init_task_mem.stack
-
-union {
-       struct {
-               struct task_struct task;
-               struct thread_info thread_info;
-       } s;
-       unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
-} init_task_mem asm ("init_task") __init_task_data =
-       {{
-       .task =         INIT_TASK(init_task_mem.s.task),
-       .thread_info =  INIT_THREAD_INFO(init_task_mem.s.task)
-}};
-
-EXPORT_SYMBOL(init_task);
index c6ecb97..9025699 100644 (file)
@@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk)
        }
 
        if (ti->softirq_time) {
-               delta = cycle_to_nsec(ti->softirq_time));
+               delta = cycle_to_nsec(ti->softirq_time);
                account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
        }
 
index 58db59d..b0b2070 100644 (file)
@@ -3,6 +3,7 @@
 #include <asm/cache.h>
 #include <asm/ptrace.h>
 #include <asm/pgtable.h>
+#include <asm/thread_info.h>
 
 #include <asm-generic/vmlinux.lds.h>
 
index b3a215b..ba00f10 100644 (file)
@@ -56,9 +56,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
index 1c44d3b..451bf60 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
 generic-y += siginfo.h
index cb79fba..b88a8dd 100644 (file)
@@ -122,7 +122,6 @@ void abort(void)
        /* if that doesn't kill us, halt */
        panic("Oops failed to kill thread");
 }
-EXPORT_SYMBOL(abort);
 
 void __init trap_init(void)
 {
index 55e55db..3d07b1d 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../uClinux-dist/romfs"
 # CONFIG_RD_BZIP2 is not set
 # CONFIG_RD_LZMA is not set
 # CONFIG_RD_XZ is not set
index 9280355..015f1ca 100644 (file)
@@ -41,8 +41,6 @@ struct thread_info {
        .preempt_count  = INIT_PREEMPT_COUNT,   \
 }
 
-#define init_stack             (init_thread_union.stack)
-
 #ifndef __ASSEMBLY__
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
@@ -58,8 +56,6 @@ static inline struct thread_info *current_thread_info(void)
 }
 #endif
 
-#define init_thread_info       (init_thread_union.thread_info)
-
 /* entry.S relies on these definitions!
  * bits 0-7 are tested at every exception exit
  * bits 8-15 are also tested at syscall exit
index 3717b64..c2e26a4 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index 3aa571a..cf6edda 100644 (file)
@@ -45,6 +45,8 @@ SECTIONS {
        .text : {
                HEAD_TEXT
                TEXT_TEXT
+               IRQENTRY_TEXT
+               SOFTIRQENTRY_TEXT
                SCHED_TEXT
                CPUIDLE_TEXT
                LOCK_TEXT
index 89172b8..625a578 100644 (file)
@@ -16,6 +16,8 @@ SECTIONS
   .text : {
        HEAD_TEXT
        TEXT_TEXT
+       IRQENTRY_TEXT
+       SOFTIRQENTRY_TEXT
        SCHED_TEXT
        CPUIDLE_TEXT
        LOCK_TEXT
index 293990e..9868270 100644 (file)
@@ -16,6 +16,8 @@ SECTIONS
   .text : {
        HEAD_TEXT
        TEXT_TEXT
+       IRQENTRY_TEXT
+       SOFTIRQENTRY_TEXT
        SCHED_TEXT
        CPUIDLE_TEXT
        LOCK_TEXT
index 554f73a..a1a9c7f 100644 (file)
@@ -74,9 +74,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the current stack pointer from C */
 register unsigned long current_stack_pointer asm("A0StP") __used;
 
index 6ac763d..f9eaf07 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 99472d2..97559fe 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/atomic.h>
 #include <linux/mm_types.h>
+#include <linux/sched.h>
 
 #include <asm/bitops.h>
 #include <asm/mmu.h>
index e7e8954..9afe4b5 100644 (file)
@@ -86,9 +86,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
index 06609ca..2c6a6bf 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 350a990..8e0b370 100644 (file)
@@ -259,6 +259,7 @@ config BCM47XX
        select LEDS_GPIO_REGISTER
        select BCM47XX_NVRAM
        select BCM47XX_SPROM
+       select BCM47XX_SSB if !BCM47XX_BCMA
        help
         Support for BCM47XX based boards
 
@@ -389,6 +390,7 @@ config LANTIQ
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_MIPS16
        select SYS_SUPPORTS_MULTITHREADING
+       select SYS_SUPPORTS_VPE_LOADER
        select SYS_HAS_EARLY_PRINTK
        select GPIOLIB
        select SWAP_IO_SPACE
@@ -516,6 +518,7 @@ config MIPS_MALTA
        select SYS_SUPPORTS_MIPS16
        select SYS_SUPPORTS_MULTITHREADING
        select SYS_SUPPORTS_SMARTMIPS
+       select SYS_SUPPORTS_VPE_LOADER
        select SYS_SUPPORTS_ZBOOT
        select SYS_SUPPORTS_RELOCATABLE
        select USE_OF
@@ -2281,9 +2284,16 @@ config MIPSR2_TO_R6_EMULATOR
          The only reason this is a build-time option is to save ~14K from the
          final kernel image.
 
+config SYS_SUPPORTS_VPE_LOADER
+       bool
+       depends on SYS_SUPPORTS_MULTITHREADING
+       help
+         Indicates that the platform supports the VPE loader, and provides
+         physical_memsize.
+
 config MIPS_VPE_LOADER
        bool "VPE loader support."
-       depends on SYS_SUPPORTS_MULTITHREADING && MODULES
+       depends on SYS_SUPPORTS_VPE_LOADER && MODULES
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select MIPS_MT
index 464af5e..0749c37 100644 (file)
@@ -124,30 +124,36 @@ config SCACHE_DEBUGFS
 
          If unsure, say N.
 
-menuconfig MIPS_CPS_NS16550
+menuconfig MIPS_CPS_NS16550_BOOL
        bool "CPS SMP NS16550 UART output"
        depends on MIPS_CPS
        help
          Output debug information via an ns16550 compatible UART if exceptions
          occur early in the boot process of a secondary core.
 
-if MIPS_CPS_NS16550
+if MIPS_CPS_NS16550_BOOL
+
+config MIPS_CPS_NS16550
+       def_bool MIPS_CPS_NS16550_BASE != 0
 
 config MIPS_CPS_NS16550_BASE
        hex "UART Base Address"
        default 0x1b0003f8 if MIPS_MALTA
+       default 0
        help
          The base address of the ns16550 compatible UART on which to output
          debug information from the early stages of core startup.
 
+         This is only used if non-zero.
+
 config MIPS_CPS_NS16550_SHIFT
        int "UART Register Shift"
-       default 0 if MIPS_MALTA
+       default 0
        help
          The number of bits to shift ns16550 register indices by in order to
          form their addresses. That is, log base 2 of the span between
          adjacent ns16550 registers in the system.
 
-endif # MIPS_CPS_NS16550
+endif # MIPS_CPS_NS16550_BOOL
 
 endmenu
index 4674f1e..e1675c2 100644 (file)
@@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void)
        uart_port.type          = PORT_AR7;
        uart_port.uartclk       = clk_get_rate(bus_clk) / 2;
        uart_port.iotype        = UPIO_MEM32;
-       uart_port.flags         = UPF_FIXED_TYPE;
+       uart_port.flags         = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
        uart_port.regshift      = 2;
 
        uart_port.line          = 0;
index e115634..301a902 100644 (file)
@@ -73,6 +73,7 @@ const char *get_system_type(void)
 
 void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
 {
+#ifdef CONFIG_SERIAL_8250_CONSOLE
        struct uart_port s;
 
        memset(&s, 0, sizeof(s));
@@ -85,6 +86,7 @@ void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
        s.uartclk = uartclk;
 
        early_serial_setup(&s);
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
 }
 
 int __init ath25_add_wmac(int nr, u32 base, int irq)
index 7c8aab2..b1f6669 100644 (file)
@@ -16,7 +16,6 @@ generic-y += qrwlock.h
 generic-y += qspinlock.h
 generic-y += sections.h
 generic-y += segment.h
-generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += unaligned.h
 generic-y += user.h
index 9e9e944..1a508a7 100644 (file)
@@ -552,7 +552,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                       pmd_t *pmdp, pmd_t pmd);
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return !!(pmd_val(pmd) & _PAGE_WRITE);
diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h
new file mode 100644 (file)
index 0000000..1d830c6
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 MIPS Tech, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __ASM__SERIAL_H
+#define __ASM__SERIAL_H
+
+#ifdef CONFIG_MIPS_GENERIC
+/*
+ * Generic kernels cannot know a correct value for all platforms at
+ * compile time. Set it to 0 to prevent 8250_early using it
+ */
+#define BASE_BAUD 0
+#else
+#include <asm-generic/serial.h>
+#endif
+
+#endif /* __ASM__SERIAL_H */
index 5e8927f..4993db4 100644 (file)
@@ -49,9 +49,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* How to get the thread information struct from C.  */
 register struct thread_info *__current_thread_info __asm__("$28");
 
index a0266fe..7a4becd 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += ipcbuf.h
index c7ed260..e68e6e0 100644 (file)
@@ -235,6 +235,7 @@ LEAF(mips_cps_core_init)
        has_mt  t0, 3f
 
        .set    push
+       .set    MIPS_ISA_LEVEL_RAW
        .set    mt
 
        /* Only allow 1 TC per VPE to execute... */
@@ -388,6 +389,7 @@ LEAF(mips_cps_boot_vpes)
 #elif defined(CONFIG_MIPS_MT)
 
        .set    push
+       .set    MIPS_ISA_LEVEL_RAW
        .set    mt
 
        /* If the core doesn't support MT then return */
index dd5567b..8f5bd04 100644 (file)
@@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
                                  *this_cpu_ptr(&cm_core_lock_flags));
        } else {
                WARN_ON(cluster != 0);
-               WARN_ON(vp != 0);
                WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
 
                /*
index 45d0b6b..57028d4 100644 (file)
@@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
        struct task_struct *t;
        int max_users;
 
+       /* If nothing to change, return right away, successfully.  */
+       if (value == mips_get_process_fp_mode(task))
+               return 0;
+
+       /* Only accept a mode change if 64-bit FP enabled for o32.  */
+       if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+               return -EOPNOTSUPP;
+
+       /* And only for o32 tasks.  */
+       if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
+               return -EOPNOTSUPP;
+
        /* Check the value is valid */
        if (value & ~known_bits)
                return -EOPNOTSUPP;
index efbd8df..0b23b1a 100644 (file)
@@ -419,63 +419,160 @@ static int gpr64_set(struct task_struct *target,
 
 #endif /* CONFIG_64BIT */
 
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
+ * correspond 1:1 to buffer slots.  Only general registers are copied.
+ */
+static int fpr_get_fpa(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      void **kbuf, void __user **ubuf)
+{
+       return user_regset_copyout(pos, count, kbuf, ubuf,
+                                  &target->thread.fpu,
+                                  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
+ * general register slots are copied to buffer slots.  Only general
+ * registers are copied.
+ */
+static int fpr_get_msa(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      void **kbuf, void __user **ubuf)
+{
+       unsigned int i;
+       u64 fpr_val;
+       int err;
+
+       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+               err = user_regset_copyout(pos, count, kbuf, ubuf,
+                                         &fpr_val, i * sizeof(elf_fpreg_t),
+                                         (i + 1) * sizeof(elf_fpreg_t));
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately.
+ */
 static int fpr_get(struct task_struct *target,
                   const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   void *kbuf, void __user *ubuf)
 {
-       unsigned i;
+       const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
        int err;
-       u64 fpr_val;
 
-       /* XXX fcr31  */
+       if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+               err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
+       else
+               err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
+       if (err)
+               return err;
 
-       if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
-               return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                          &target->thread.fpu,
-                                          0, sizeof(elf_fpregset_t));
+       err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &target->thread.fpu.fcr31,
+                                 fcr31_pos, fcr31_pos + sizeof(u32));
 
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
-               err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                         &fpr_val, i * sizeof(elf_fpreg_t),
-                                         (i + 1) * sizeof(elf_fpreg_t));
+       return err;
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
+ * context's general register slots.  Only general registers are copied.
+ */
+static int fpr_set_fpa(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      const void **kbuf, const void __user **ubuf)
+{
+       return user_regset_copyin(pos, count, kbuf, ubuf,
+                                 &target->thread.fpu,
+                                 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
+ * bits only of FP context's general register slots.  Only general
+ * registers are copied.
+ */
+static int fpr_set_msa(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      const void **kbuf, const void __user **ubuf)
+{
+       unsigned int i;
+       u64 fpr_val;
+       int err;
+
+       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+       for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+               err = user_regset_copyin(pos, count, kbuf, ubuf,
+                                        &fpr_val, i * sizeof(elf_fpreg_t),
+                                        (i + 1) * sizeof(elf_fpreg_t));
                if (err)
                        return err;
+               set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
        }
 
        return 0;
 }
 
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately.
+ *
+ * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
+ * which is supposed to have been guaranteed by the kernel before
+ * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
+ * so that we can safely avoid preinitializing temporaries for
+ * partial register writes.
+ */
 static int fpr_set(struct task_struct *target,
                   const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   const void *kbuf, const void __user *ubuf)
 {
-       unsigned i;
+       const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+       u32 fcr31;
        int err;
-       u64 fpr_val;
 
-       /* XXX fcr31  */
+       BUG_ON(count % sizeof(elf_fpreg_t));
+
+       if (pos + count > sizeof(elf_fpregset_t))
+               return -EIO;
 
        init_fp_ctx(target);
 
-       if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
-               return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                         &target->thread.fpu,
-                                         0, sizeof(elf_fpregset_t));
+       if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+               err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
+       else
+               err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
+       if (err)
+               return err;
 
-       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
-       for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
+       if (count > 0) {
                err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                        &fpr_val, i * sizeof(elf_fpreg_t),
-                                        (i + 1) * sizeof(elf_fpreg_t));
+                                        &fcr31,
+                                        fcr31_pos, fcr31_pos + sizeof(u32));
                if (err)
                        return err;
-               set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+
+               ptrace_setfcr31(target, fcr31);
        }
 
-       return 0;
+       return err;
 }
 
 enum mips_regset {
index d535edc..75fdeaa 100644 (file)
@@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r = -EINTR;
-       sigset_t sigsaved;
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (vcpu->mmio_needed) {
                if (!vcpu->mmio_is_write)
@@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        local_irq_enable();
 
 out:
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index 78c2aff..e84e126 100644 (file)
@@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000)               += r3k_dump_tlb.o
 obj-$(CONFIG_CPU_TX39XX)       += r3k_dump_tlb.o
 
 # libgcc-style stuff needed in the kernel
-obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o
+obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \
+        ucmpdi2.o
index 28002ed..199a7f9 100644 (file)
@@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__)));
 struct DWstruct {
        int high, low;
 };
+
+struct TWstruct {
+       long long high, low;
+};
 #elif defined(__LITTLE_ENDIAN)
 struct DWstruct {
        int low, high;
 };
+
+struct TWstruct {
+       long long low, high;
+};
 #else
 #error I feel sick.
 #endif
@@ -23,4 +31,13 @@ typedef union {
        long long ll;
 } DWunion;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
+typedef int ti_type __attribute__((mode(TI)));
+
+typedef union {
+       struct TWstruct s;
+       ti_type ti;
+} TWunion;
+#endif
+
 #endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644 (file)
index 0000000..111ad47
--- /dev/null
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+
+#include "libgcc.h"
+
+/*
+ * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
+ * specific case only we'll implement it here.
+ *
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
+ */
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
+
+/* multiply 64-bit values, low 64-bits returned */
+static inline long long notrace dmulu(long long a, long long b)
+{
+       long long res;
+
+       asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+       return res;
+}
+
+/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
+static inline long long notrace dmuhu(long long a, long long b)
+{
+       long long res;
+
+       asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+       return res;
+}
+
+/* multiply 128-bit values, low 128-bits returned */
+ti_type notrace __multi3(ti_type a, ti_type b)
+{
+       TWunion res, aa, bb;
+
+       aa.ti = a;
+       bb.ti = b;
+
+       /*
+        * a * b =           (a.lo * b.lo)
+        *         + 2^64  * (a.hi * b.lo + a.lo * b.hi)
+        *        [+ 2^128 * (a.hi * b.hi)]
+        */
+       res.s.low = dmulu(aa.s.low, bb.s.low);
+       res.s.high = dmuhu(aa.s.low, bb.s.low);
+       res.s.high += dmulu(aa.s.high, bb.s.low);
+       res.s.high += dmulu(aa.s.low, bb.s.high);
+
+       return res.ti;
+}
+EXPORT_SYMBOL(__multi3);
+
+#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
index cdb5a19..9bb6baa 100644 (file)
@@ -40,7 +40,7 @@
 
 #include "uasm.c"
 
-static const struct insn const insn_table_MM[insn_invalid] = {
+static const struct insn insn_table_MM[insn_invalid] = {
        [insn_addu]     = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
        [insn_addiu]    = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
        [insn_and]      = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
index d4469b2..4f46a45 100644 (file)
@@ -109,9 +109,9 @@ static int rt_timer_probe(struct platform_device *pdev)
        }
 
        rt->irq = platform_get_irq(pdev, 0);
-       if (!rt->irq) {
+       if (rt->irq < 0) {
                dev_err(&pdev->dev, "failed to load irq\n");
-               return -ENOENT;
+               return rt->irq;
        }
 
        rt->membase = devm_ioremap_resource(&pdev->dev, res);
index efdecdb..8186afc 100644 (file)
@@ -2,4 +2,6 @@
 # Makefile for the RB532 board specific parts of the kernel
 #
 
-obj-y   += irq.o time.o setup.o serial.o prom.o gpio.o devices.o
+obj-$(CONFIG_SERIAL_8250_CONSOLE) += serial.o
+
+obj-y   += irq.o time.o setup.o prom.o gpio.o devices.o
index 32ea3e6..354d258 100644 (file)
@@ -310,6 +310,8 @@ static int __init plat_setup_devices(void)
        return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
 }
 
+#ifdef CONFIG_NET
+
 static int __init setup_kmac(char *s)
 {
        printk(KERN_INFO "korina mac = %s\n", s);
@@ -322,4 +324,6 @@ static int __init setup_kmac(char *s)
 
 __setup("kmac=", setup_kmac);
 
+#endif /* CONFIG_NET */
+
 arch_initcall(plat_setup_devices);
index f5f90bb..1748a7b 100644 (file)
@@ -79,8 +79,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
 #define init_uregs                                                     \
        ((struct pt_regs *)                                             \
         ((unsigned long) init_stack + THREAD_SIZE - sizeof(struct pt_regs)))
index c94ee54..81271d3 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y      += bpf_perf_event.h
 generic-y      += siginfo.h
index d69c338..7349a4f 100644 (file)
@@ -63,9 +63,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
index ffca24d..13a3d77 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 396d8f3..af31a9f 100644 (file)
@@ -84,8 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
 void release_thread(struct task_struct *);
 unsigned long get_wchan(struct task_struct *p);
 
-#define init_stack      (init_thread_union.stack)
-
 #define cpu_relax()     barrier()
 
 #endif /* __ASSEMBLY__ */
index c229aa6..5c15dfa 100644 (file)
@@ -79,8 +79,6 @@ struct thread_info {
        .ksp            = 0,                            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-
 /* how to get the thread information struct from C */
 register struct thread_info *current_thread_info_reg asm("r10");
 #define current_thread_info()   (current_thread_info_reg)
index 62286db..130c16c 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 00ddb78..953bdcd 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <asm/page.h>
 #include <asm/cache.h>
+#include <asm/thread_info.h>
 #include <asm-generic/vmlinux.lds.h>
 
 #ifdef __OR1K__
index 9345b44..f57118e 100644 (file)
@@ -123,8 +123,8 @@ int puts(const char *s)
        while ((nuline = strchr(s, '\n')) != NULL) {
                if (nuline != s)
                        pdc_iodc_print(s, nuline - s);
-                       pdc_iodc_print("\r\n", 2);
-                       s = nuline + 1;
+               pdc_iodc_print("\r\n", 2);
+               s = nuline + 1;
        }
        if (*s != '\0')
                pdc_iodc_print(s, strlen(s));
index dd5a08a..3eb4bfc 100644 (file)
@@ -12,6 +12,7 @@
    for the semaphore.  */
 
 #define __PA_LDCW_ALIGNMENT    16
+#define __PA_LDCW_ALIGN_ORDER  4
 #define __ldcw_align(a) ({                                     \
        unsigned long __ret = (unsigned long) &(a)->lock[0];    \
        __ret = (__ret + __PA_LDCW_ALIGNMENT - 1)               \
@@ -29,6 +30,7 @@
    ldcd). */
 
 #define __PA_LDCW_ALIGNMENT    4
+#define __PA_LDCW_ALIGN_ORDER  2
 #define __ldcw_align(a) (&(a)->slock)
 #define __LDCW "ldcw,co"
 
index c980a02..2857575 100644 (file)
@@ -25,9 +25,6 @@ struct thread_info {
        .preempt_count  = INIT_PREEMPT_COUNT,   \
 }
 
-#define init_thread_info        (init_thread_union.thread_info)
-#define init_stack              (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 #define current_thread_info()  ((struct thread_info *)mfctl(30))
 
@@ -35,7 +32,12 @@ struct thread_info {
 
 /* thread information allocation */
 
+#ifdef CONFIG_IRQSTACKS
+#define THREAD_SIZE_ORDER      2 /* PA-RISC requires at least 16k stack */
+#else
 #define THREAD_SIZE_ORDER      3 /* PA-RISC requires at least 32k stack */
+#endif
+
 /* Be sure to hunt all references to this down when you change the size of
  * the kernel stack */
 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
index 196d2a4..286ef5a 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
 generic-y += param.h
 generic-y += poll.h
index d8f7735..29b99b8 100644 (file)
@@ -870,7 +870,7 @@ static void print_parisc_device(struct parisc_device *dev)
        static int count;
 
        print_pa_hwpath(dev, hw_path);
-       printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+       printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
                ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
                dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
 
index a4fd296..e95207c 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/pgtable.h>
 #include <asm/signal.h>
 #include <asm/unistd.h>
+#include <asm/ldcw.h>
 #include <asm/thread_info.h>
 
 #include <linux/linkage.h>
 #endif
 
        .import         pa_tlb_lock,data
+       .macro  load_pa_tlb_lock reg
+#if __PA_LDCW_ALIGNMENT > 4
+       load32  PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
+       depi    0,31,__PA_LDCW_ALIGN_ORDER, \reg
+#else
+       load32  PA(pa_tlb_lock), \reg
+#endif
+       .endm
 
        /* space_to_prot macro creates a prot id from a space id */
 
        .macro          tlb_lock        spc,ptp,pte,tmp,tmp1,fault
 #ifdef CONFIG_SMP
        cmpib,COND(=),n 0,\spc,2f
-       load32          PA(pa_tlb_lock),\tmp
+       load_pa_tlb_lock \tmp
 1:     LDCW            0(\tmp),\tmp1
        cmpib,COND(=)   0,\tmp1,1b
        nop
        /* Release pa_tlb_lock lock. */
        .macro          tlb_unlock1     spc,tmp
 #ifdef CONFIG_SMP
-       load32          PA(pa_tlb_lock),\tmp
+       load_pa_tlb_lock \tmp
        tlb_unlock0     \spc,\tmp
 #endif
        .endm
@@ -878,9 +887,6 @@ ENTRY_CFI(syscall_exit_rfi)
        STREG   %r19,PT_SR7(%r16)
 
 intr_return:
-       /* NOTE: Need to enable interrupts incase we schedule. */
-       ssm     PSW_SM_I, %r0
-
        /* check for reschedule */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
@@ -907,6 +913,11 @@ intr_check_sig:
        LDREG   PT_IASQ1(%r16), %r20
        cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 
+       /* NOTE: We need to enable interrupts if we have to deliver
+        * signals. We used to do this earlier but it caused kernel
+        * stack overflows. */
+       ssm     PSW_SM_I, %r0
+
        copy    %r0, %r25                       /* long in_syscall = 0 */
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
@@ -958,6 +969,10 @@ intr_do_resched:
        cmpib,COND(=)   0, %r20, intr_do_preempt
        nop
 
+       /* NOTE: We need to enable interrupts if we schedule.  We used
+        * to do this earlier but it caused kernel stack overflows. */
+       ssm     PSW_SM_I, %r0
+
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29          /* Reference param save area */
 #endif
index e3a8e5e..8d072c4 100644 (file)
@@ -305,6 +305,7 @@ ENDPROC_CFI(os_hpmc)
 
 
        __INITRODATA
+       .align 4
        .export os_hpmc_size
 os_hpmc_size:
        .word .os_hpmc_end-.os_hpmc
index adf7187..2d40c4f 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/assembly.h>
 #include <asm/pgtable.h>
 #include <asm/cache.h>
+#include <asm/ldcw.h>
 #include <linux/linkage.h>
 
        .text
@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
 
        .macro  tlb_lock        la,flags,tmp
 #ifdef CONFIG_SMP
-       ldil            L%pa_tlb_lock,%r1
-       ldo             R%pa_tlb_lock(%r1),\la
+#if __PA_LDCW_ALIGNMENT > 4
+       load32          pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
+       depi            0,31,__PA_LDCW_ALIGN_ORDER, \la
+#else
+       load32          pa_tlb_lock, \la
+#endif
        rsm             PSW_SM_I,\flags
 1:     LDCW            0(\la),\tmp
        cmpib,<>,n      0,\tmp,3f
index 30f9239..cad3e86 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
+#include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/personality.h>
 #include <linux/ptrace.h>
@@ -183,6 +184,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
        return 1;
 }
 
+/*
+ * Idle thread support
+ *
+ * Detect when running on QEMU with SeaBIOS PDC Firmware and let
+ * QEMU idle the host too.
+ */
+
+int running_on_qemu __read_mostly;
+
+void __cpuidle arch_cpu_idle_dead(void)
+{
+       /* nop on real hardware, qemu will offline CPU. */
+       asm volatile("or %%r31,%%r31,%%r31\n":::);
+}
+
+void __cpuidle arch_cpu_idle(void)
+{
+       local_irq_enable();
+
+       /* nop on real hardware, qemu will idle sleep. */
+       asm volatile("or %%r10,%%r10,%%r10\n":::);
+}
+
+static int __init parisc_idle_init(void)
+{
+       const char *marker;
+
+       /* check QEMU/SeaBIOS marker in PAGE0 */
+       marker = (char *) &PAGE0->pad0;
+       running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
+
+       if (!running_on_qemu)
+               cpu_idle_poll_ctrl(1);
+
+       return 0;
+}
+arch_initcall(parisc_idle_init);
+
 /*
  * Copy architecture-specific thread state
  */
index 5a65798..143f90e 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/kallsyms.h>
 #include <linux/sort.h>
-#include <linux/sched.h>
 
 #include <linux/uaccess.h>
 #include <asm/assembly.h>
index 7eab4bb..66e5065 100644 (file)
@@ -16,9 +16,7 @@
 #include <linux/preempt.h>
 #include <linux/init.h>
 
-#include <asm/processor.h>
 #include <asm/delay.h>
-
 #include <asm/special_insns.h>    /* for mfctl() */
 #include <asm/processor.h> /* for boot_cpu_data */
 
index 13f7854..48f4139 100644 (file)
@@ -631,11 +631,11 @@ void __init mem_init(void)
        mem_init_print_info(NULL);
 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
        printk("virtual kernel memory layout:\n"
-              "    vmalloc : 0x%p - 0x%p   (%4ld MB)\n"
-              "    memory  : 0x%p - 0x%p   (%4ld MB)\n"
-              "      .init : 0x%p - 0x%p   (%4ld kB)\n"
-              "      .data : 0x%p - 0x%p   (%4ld kB)\n"
-              "      .text : 0x%p - 0x%p   (%4ld kB)\n",
+              "    vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
+              "    memory  : 0x%px - 0x%px   (%4ld MB)\n"
+              "      .init : 0x%px - 0x%px   (%4ld kB)\n"
+              "      .data : 0x%px - 0x%px   (%4ld kB)\n"
+              "      .text : 0x%px - 0x%px   (%4ld kB)\n",
 
               (void*)VMALLOC_START, (void*)VMALLOC_END,
               (VMALLOC_END - VMALLOC_START) >> 20,
index c51e6ce..2ed525a 100644 (file)
@@ -166,6 +166,7 @@ config PPC
        select GENERIC_CLOCKEVENTS_BROADCAST    if SMP
        select GENERIC_CMOS_UPDATE
        select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_VULNERABILITIES      if PPC_BOOK3S_64
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
        select GENERIC_SMP_IDLE_THREAD
index 9a677cd..4469781 100644 (file)
@@ -1005,7 +1005,6 @@ static inline int pmd_protnone(pmd_t pmd)
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 #define __pmd_write(pmd)       __pte_write(pmd_pte(pmd))
 #define pmd_savedwrite(pmd)    pte_savedwrite(pmd_pte(pmd))
index a703452..555e22d 100644 (file)
@@ -209,5 +209,11 @@ exc_##label##_book3e:
        ori     r3,r3,vector_offset@l;          \
        mtspr   SPRN_IVOR##vector_number,r3;
 
+#define RFI_TO_KERNEL                                                  \
+       rfi
+
+#define RFI_TO_USER                                                    \
+       rfi
+
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
index b272052..7197b17 100644 (file)
  */
 #define EX_R3          EX_DAR
 
+/*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+ * The nop instructions allow us to insert one or more instructions to flush the
+ * L1-D cache when returning to userspace or a guest.
+ */
+#define RFI_FLUSH_SLOT                                                 \
+       RFI_FLUSH_FIXUP_SECTION;                                        \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define RFI_TO_KERNEL                                                  \
+       rfid
+
+#define RFI_TO_USER                                                    \
+       RFI_FLUSH_SLOT;                                                 \
+       rfid;                                                           \
+       b       rfi_flush_fallback
+
+#define RFI_TO_USER_OR_KERNEL                                          \
+       RFI_FLUSH_SLOT;                                                 \
+       rfid;                                                           \
+       b       rfi_flush_fallback
+
+#define RFI_TO_GUEST                                                   \
+       RFI_FLUSH_SLOT;                                                 \
+       rfid;                                                           \
+       b       rfi_flush_fallback
+
+#define HRFI_TO_KERNEL                                                 \
+       hrfid
+
+#define HRFI_TO_USER                                                   \
+       RFI_FLUSH_SLOT;                                                 \
+       hrfid;                                                          \
+       b       hrfi_flush_fallback
+
+#define HRFI_TO_USER_OR_KERNEL                                         \
+       RFI_FLUSH_SLOT;                                                 \
+       hrfid;                                                          \
+       b       hrfi_flush_fallback
+
+#define HRFI_TO_GUEST                                                  \
+       RFI_FLUSH_SLOT;                                                 \
+       hrfid;                                                          \
+       b       hrfi_flush_fallback
+
+#define HRFI_TO_UNKNOWN                                                        \
+       RFI_FLUSH_SLOT;                                                 \
+       hrfid;                                                          \
+       b       hrfi_flush_fallback
+
 #ifdef CONFIG_RELOCATABLE
 #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)                   \
        mfspr   r11,SPRN_##h##SRR0;     /* save SRR0 */                 \
@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
        mtspr   SPRN_##h##SRR0,r12;                                     \
        mfspr   r12,SPRN_##h##SRR1;     /* and SRR1 */                  \
        mtspr   SPRN_##h##SRR1,r10;                                     \
-       h##rfid;                                                        \
+       h##RFI_TO_KERNEL;                                               \
        b       .       /* prevent speculative execution */
 #define EXCEPTION_PROLOG_PSERIES_1(label, h)                           \
        __EXCEPTION_PROLOG_PSERIES_1(label, h)
@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
        mtspr   SPRN_##h##SRR0,r12;                                     \
        mfspr   r12,SPRN_##h##SRR1;     /* and SRR1 */                  \
        mtspr   SPRN_##h##SRR1,r10;                                     \
-       h##rfid;                                                        \
+       h##RFI_TO_KERNEL;                                               \
        b       .       /* prevent speculative execution */
 
 #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h)                      \
index 8f88f77..1e82eb3 100644 (file)
@@ -187,7 +187,20 @@ label##3:                                          \
        FTR_ENTRY_OFFSET label##1b-label##3b;           \
        .popsection;
 
+#define RFI_FLUSH_FIXUP_SECTION                                \
+951:                                                   \
+       .pushsection __rfi_flush_fixup,"a";             \
+       .align 2;                                       \
+952:                                                   \
+       FTR_ENTRY_OFFSET 951b-952b;                     \
+       .popsection;
+
+
 #ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+
 void apply_feature_fixups(void);
 void setup_feature_keys(void);
 #endif
index a409177..eca3f9c 100644 (file)
 #define H_GET_HCA_INFO          0x1B8
 #define H_GET_PERF_COUNT        0x1BC
 #define H_MANAGE_TRACE          0x1C0
+#define H_GET_CPU_CHARACTERISTICS 0x1C8
 #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
 #define H_QUERY_INT_STATE       0x1E4
 #define H_POLL_PENDING         0x1D8
 #define H_SIGNAL_SYS_RESET_ALL_OTHERS          -2
 /* >= 0 values are CPU number */
 
+/* H_GET_CPU_CHARACTERISTICS return values */
+#define H_CPU_CHAR_SPEC_BAR_ORI31      (1ull << 63) // IBM bit 0
+#define H_CPU_CHAR_BCCTRL_SERIALISED   (1ull << 62) // IBM bit 1
+#define H_CPU_CHAR_L1D_FLUSH_ORI30     (1ull << 61) // IBM bit 2
+#define H_CPU_CHAR_L1D_FLUSH_TRIG2     (1ull << 60) // IBM bit 3
+#define H_CPU_CHAR_L1D_THREAD_PRIV     (1ull << 59) // IBM bit 4
+
+#define H_CPU_BEHAV_FAVOUR_SECURITY    (1ull << 63) // IBM bit 0
+#define H_CPU_BEHAV_L1D_FLUSH_PR       (1ull << 62) // IBM bit 1
+#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR  (1ull << 61) // IBM bit 2
+
 /* Flag values used in H_REGISTER_PROC_TBL hcall */
 #define PROC_TABLE_OP_MASK     0x18
 #define PROC_TABLE_DEREG       0x10
 #define PROC_TABLE_GTSE                0x01
 
 #ifndef __ASSEMBLY__
+#include <linux/types.h>
 
 /**
  * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
@@ -436,6 +449,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
        }
 }
 
+struct h_cpu_char_result {
+       u64 character;
+       u64 behaviour;
+};
+
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_HVCALL_H */
index 96753f3..941c2a3 100644 (file)
@@ -180,6 +180,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
                struct iommu_group *grp);
 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
+extern void kvmppc_setup_partition_table(struct kvm *kvm);
 
 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                                struct kvm_create_spapr_tce_64 *args);
index 73b9201..cd2fc1c 100644 (file)
@@ -76,6 +76,7 @@ struct machdep_calls {
 
        void __noreturn (*restart)(char *cmd);
        void __noreturn (*halt)(void);
+       void            (*panic)(char *str);
        void            (*cpu_die)(void);
 
        long            (*time_init)(void); /* Optional, may be NULL */
index 6177d43..e2a2b84 100644 (file)
@@ -160,9 +160,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
 #endif
 }
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-                                struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+                               struct mm_struct *mm)
 {
+       return 0;
 }
 
 #ifndef CONFIG_PPC_BOOK3S_64
index 3892db9..23ac7fc 100644 (file)
@@ -232,6 +232,16 @@ struct paca_struct {
        struct sibling_subcore_state *sibling_subcore_state;
 #endif
 #endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       /*
+        * rfi fallback flush must be in its own cacheline to prevent
+        * other paca data leaking into the L1d
+        */
+       u64 exrfi[EX_SIZE] __aligned(0x80);
+       void *rfi_flush_fallback_area;
+       u64 l1d_flush_congruence;
+       u64 l1d_flush_sets;
+#endif
 };
 
 extern void copy_mm_to_paca(struct mm_struct *mm);
index 7f01b22..55eddf5 100644 (file)
@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
        return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
 }
 
+static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
+{
+       unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+       long rc;
+
+       rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
+       if (rc == H_SUCCESS) {
+               p->character = retbuf[0];
+               p->behaviour = retbuf[1];
+       }
+
+       return rc;
+}
+
 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
index 257d23d..469b7fd 100644 (file)
@@ -24,6 +24,7 @@ extern void reloc_got2(unsigned long);
 
 void check_for_initrd(void);
 void initmem_init(void);
+void setup_panic(void);
 #define ARCH_PANIC_TIMEOUT 180
 
 #ifdef CONFIG_PPC_PSERIES
@@ -38,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
 static inline void pseries_little_endian_exceptions(void) {}
 #endif /* CONFIG_PPC_PSERIES */
 
+void rfi_flush_enable(bool enable);
+
+/* These are bit flags */
+enum l1d_flush_type {
+       L1D_FLUSH_NONE          = 0x1,
+       L1D_FLUSH_FALLBACK      = 0x2,
+       L1D_FLUSH_ORI           = 0x4,
+       L1D_FLUSH_MTTRIG        = 0x8,
+};
+
+void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
+void do_rfi_flush_fixups(enum l1d_flush_type types);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_SETUP_H */
index a264c3a..4a12c00 100644 (file)
@@ -58,9 +58,6 @@ struct thread_info {
        .flags =        0,                      \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 #define THREAD_SIZE_ORDER      (THREAD_SHIFT - PAGE_SHIFT)
 
 /* how to get the thread information struct from C */
index 0d960ef..1a6ed59 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += param.h
 generic-y += poll.h
 generic-y += resource.h
index 61d6049..637b726 100644 (file)
@@ -443,6 +443,31 @@ struct kvm_ppc_rmmu_info {
        __u32   ap_encodings[8];
 };
 
+/* For KVM_PPC_GET_CPU_CHAR */
+struct kvm_ppc_cpu_char {
+       __u64   character;              /* characteristics of the CPU */
+       __u64   behaviour;              /* recommended software behaviour */
+       __u64   character_mask;         /* valid bits in character */
+       __u64   behaviour_mask;         /* valid bits in behaviour */
+};
+
+/*
+ * Values for character and character_mask.
+ * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
+ */
+#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31                (1ULL << 63)
+#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED     (1ULL << 62)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30       (1ULL << 61)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2       (1ULL << 60)
+#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV       (1ULL << 59)
+#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED      (1ULL << 58)
+#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF     (1ULL << 57)
+#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS       (1ULL << 56)
+
+#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY      (1ULL << 63)
+#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR         (1ULL << 62)
+#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR    (1ULL << 61)
+
 /* Per-vcpu XICS interrupt controller state */
 #define KVM_REG_PPC_ICP_STATE  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
 
index 6b95841..f390d57 100644 (file)
@@ -237,6 +237,11 @@ int main(void)
        OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
        OFFSET(PACA_IN_MCE, paca_struct, in_mce);
        OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
+       OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
+       OFFSET(PACA_EXRFI, paca_struct, exrfi);
+       OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
+       OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
+
 #endif
        OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
        OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
index 610955f..679bbe7 100644 (file)
@@ -102,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
        li      r0,0
        mtspr   SPRN_PSSCR,r0
        mtspr   SPRN_LPID,r0
+       mtspr   SPRN_PID,r0
        mfspr   r3,SPRN_LPCR
        LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE  | LPCR_HEIC)
        or      r3, r3, r4
@@ -126,6 +127,7 @@ _GLOBAL(__restore_cpu_power9)
        li      r0,0
        mtspr   SPRN_PSSCR,r0
        mtspr   SPRN_LPID,r0
+       mtspr   SPRN_PID,r0
        mfspr   r3,SPRN_LPCR
        LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
        or      r3, r3, r4
index 3320bca..2748584 100644 (file)
 #include <asm/tm.h>
 #include <asm/ppc-opcode.h>
 #include <asm/export.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/exception-64s.h>
+#else
+#include <asm/exception-64e.h>
+#endif
 
 /*
  * System calls.
@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
        ld      r13,GPR13(r1)   /* only restore r13 if returning to usermode */
+       ld      r2,GPR2(r1)
+       ld      r1,GPR1(r1)
+       mtlr    r4
+       mtcr    r5
+       mtspr   SPRN_SRR0,r7
+       mtspr   SPRN_SRR1,r8
+       RFI_TO_USER
+       b       .       /* prevent speculative execution */
+
+       /* exit to kernel */
 1:     ld      r2,GPR2(r1)
        ld      r1,GPR1(r1)
        mtlr    r4
        mtcr    r5
        mtspr   SPRN_SRR0,r7
        mtspr   SPRN_SRR1,r8
-       RFI
+       RFI_TO_KERNEL
        b       .       /* prevent speculative execution */
 
 .Lsyscall_error:
@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        mtmsrd  r10, 1
        mtspr   SPRN_SRR0, r11
        mtspr   SPRN_SRR1, r12
-
-       rfid
+       RFI_TO_USER
        b       .       /* prevent speculative execution */
 #endif
 _ASM_NOKPROBE_SYMBOL(system_call_common);
@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
        REST_GPR(13, r1)
-1:
+
        mtspr   SPRN_SRR1,r3
 
        ld      r2,_CCR(r1)
@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        ld      r3,GPR3(r1)
        ld      r4,GPR4(r1)
        ld      r1,GPR1(r1)
+       RFI_TO_USER
+       b       .       /* prevent speculative execution */
 
-       rfid
+1:     mtspr   SPRN_SRR1,r3
+
+       ld      r2,_CCR(r1)
+       mtcrf   0xFF,r2
+       ld      r2,_NIP(r1)
+       mtspr   SPRN_SRR0,r2
+
+       ld      r0,GPR0(r1)
+       ld      r2,GPR2(r1)
+       ld      r3,GPR3(r1)
+       ld      r4,GPR4(r1)
+       ld      r1,GPR1(r1)
+       RFI_TO_KERNEL
        b       .       /* prevent speculative execution */
 
 #endif /* CONFIG_PPC_BOOK3E */
@@ -1073,7 +1101,7 @@ __enter_rtas:
        
        mtspr   SPRN_SRR0,r5
        mtspr   SPRN_SRR1,r6
-       rfid
+       RFI_TO_KERNEL
        b       .       /* prevent speculative execution */
 
 rtas_return_loc:
@@ -1098,7 +1126,7 @@ rtas_return_loc:
 
        mtspr   SPRN_SRR0,r3
        mtspr   SPRN_SRR1,r4
-       rfid
+       RFI_TO_KERNEL
        b       .       /* prevent speculative execution */
 _ASM_NOKPROBE_SYMBOL(__enter_rtas)
 _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom)
        LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
        andc    r11,r11,r12
        mtsrr1  r11
-       rfid
+       RFI_TO_KERNEL
 #endif /* CONFIG_PPC_BOOK3E */
 
 1:     /* Return from OF */
index e441b46..2dc10bf 100644 (file)
@@ -256,7 +256,7 @@ BEGIN_FTR_SECTION
        LOAD_HANDLER(r12, machine_check_handle_early)
 1:     mtspr   SPRN_SRR0,r12
        mtspr   SPRN_SRR1,r11
-       rfid
+       RFI_TO_KERNEL
        b       .       /* prevent speculative execution */
 2:
        /* Stack overflow. Stay on emergency stack and panic.
@@ -445,7 +445,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
        li      r3,MSR_ME
        andc    r10,r10,r3              /* Turn off MSR_ME */
        mtspr   SPRN_SRR1,r10
-       rfid
+       RFI_TO_KERNEL
        b       .
 2:
        /*
@@ -463,7 +463,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
         */
        bl      machine_check_queue_event
        MACHINE_CHECK_HANDLER_WINDUP
-       rfid
+       RFI_TO_USER_OR_KERNEL
 9:
        /* Deliver the machine check to host kernel in V mode. */
        MACHINE_CHECK_HANDLER_WINDUP
@@ -598,6 +598,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
        stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
        std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
 
+       andi.   r9,r11,MSR_PR   // Check for exception from userspace
+       cmpdi   cr4,r9,MSR_PR   // And save the result in CR4 for later
+
        /*
         * Test MSR_RI before calling slb_allocate_realmode, because the
         * MSR in r11 gets clobbered. However we still want to allocate
@@ -624,9 +627,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
 
        /* All done -- return from exception. */
 
+       bne     cr4,1f          /* returning to kernel */
+
 .machine       push
 .machine       "power4"
        mtcrf   0x80,r9
+       mtcrf   0x08,r9         /* MSR[PR] indication is in cr4 */
        mtcrf   0x04,r9         /* MSR[RI] indication is in cr5 */
        mtcrf   0x02,r9         /* I/D indication is in cr6 */
        mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
@@ -640,9 +646,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
        ld      r11,PACA_EXSLB+EX_R11(r13)
        ld      r12,PACA_EXSLB+EX_R12(r13)
        ld      r13,PACA_EXSLB+EX_R13(r13)
-       rfid
+       RFI_TO_USER
+       b       .       /* prevent speculative execution */
+1:
+.machine       push
+.machine       "power4"
+       mtcrf   0x80,r9
+       mtcrf   0x08,r9         /* MSR[PR] indication is in cr4 */
+       mtcrf   0x04,r9         /* MSR[RI] indication is in cr5 */
+       mtcrf   0x02,r9         /* I/D indication is in cr6 */
+       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
+.machine       pop
+
+       RESTORE_CTR(r9, PACA_EXSLB)
+       RESTORE_PPR_PACA(PACA_EXSLB, r9)
+       mr      r3,r12
+       ld      r9,PACA_EXSLB+EX_R9(r13)
+       ld      r10,PACA_EXSLB+EX_R10(r13)
+       ld      r11,PACA_EXSLB+EX_R11(r13)
+       ld      r12,PACA_EXSLB+EX_R12(r13)
+       ld      r13,PACA_EXSLB+EX_R13(r13)
+       RFI_TO_KERNEL
        b       .       /* prevent speculative execution */
 
+
 2:     std     r3,PACA_EXSLB+EX_DAR(r13)
        mr      r3,r12
        mfspr   r11,SPRN_SRR0
@@ -651,7 +678,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
        mtspr   SPRN_SRR0,r10
        ld      r10,PACAKMSR(r13)
        mtspr   SPRN_SRR1,r10
-       rfid
+       RFI_TO_KERNEL
        b       .
 
 8:     std     r3,PACA_EXSLB+EX_DAR(r13)
@@ -662,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
        mtspr   SPRN_SRR0,r10
        ld      r10,PACAKMSR(r13)
        mtspr   SPRN_SRR1,r10
-       rfid
+       RFI_TO_KERNEL
        b       .
 
 EXC_COMMON_BEGIN(unrecov_slb)
@@ -901,7 +928,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
        mtspr   SPRN_SRR0,r10 ;                                 \
        ld      r10,PACAKMSR(r13) ;                             \
        mtspr   SPRN_SRR1,r10 ;                                 \
-       rfid ;                                                  \
+       RFI_TO_KERNEL ;                                         \
        b       . ;     /* prevent speculative execution */
 
 #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
@@ -917,7 +944,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                              \
        xori    r12,r12,MSR_LE ;                                \
        mtspr   SPRN_SRR1,r12 ;                                 \
        mr      r13,r9 ;                                        \
-       rfid ;          /* return to userspace */               \
+       RFI_TO_USER ;   /* return to userspace */               \
        b       . ;     /* prevent speculative execution */
 #else
 #define SYSCALL_FASTENDIAN_TEST
@@ -1063,7 +1090,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
        mtcr    r11
        REST_GPR(11, r1)
        ld      r1,GPR1(r1)
-       hrfid
+       HRFI_TO_USER_OR_KERNEL
 
 1:     mtcr    r11
        REST_GPR(11, r1)
@@ -1314,7 +1341,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
        ld      r11,PACA_EXGEN+EX_R11(r13)
        ld      r12,PACA_EXGEN+EX_R12(r13)
        ld      r13,PACA_EXGEN+EX_R13(r13)
-       HRFID
+       HRFI_TO_UNKNOWN
        b       .
 #endif
 
@@ -1418,10 +1445,94 @@ masked_##_H##interrupt:                                 \
        ld      r10,PACA_EXGEN+EX_R10(r13);             \
        ld      r11,PACA_EXGEN+EX_R11(r13);             \
        /* returns to kernel where r13 must be set up, so don't restore it */ \
-       ##_H##rfid;                                     \
+       ##_H##RFI_TO_KERNEL;                            \
        b       .;                                      \
        MASKED_DEC_HANDLER(_H)
 
+TRAMP_REAL_BEGIN(rfi_flush_fallback)
+       SET_SCRATCH0(r13);
+       GET_PACA(r13);
+       std     r9,PACA_EXRFI+EX_R9(r13)
+       std     r10,PACA_EXRFI+EX_R10(r13)
+       std     r11,PACA_EXRFI+EX_R11(r13)
+       std     r12,PACA_EXRFI+EX_R12(r13)
+       std     r8,PACA_EXRFI+EX_R13(r13)
+       mfctr   r9
+       ld      r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+       ld      r11,PACA_L1D_FLUSH_SETS(r13)
+       ld      r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
+       /*
+        * The load adresses are at staggered offsets within cachelines,
+        * which suits some pipelines better (on others it should not
+        * hurt).
+        */
+       addi    r12,r12,8
+       mtctr   r11
+       DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+
+       /* order ld/st prior to dcbt stop all streams with flushing */
+       sync
+1:     li      r8,0
+       .rept   8 /* 8-way set associative */
+       ldx     r11,r10,r8
+       add     r8,r8,r12
+       xor     r11,r11,r11     // Ensure r11 is 0 even if fallback area is not
+       add     r8,r8,r11       // Add 0, this creates a dependency on the ldx
+       .endr
+       addi    r10,r10,128 /* 128 byte cache line */
+       bdnz    1b
+
+       mtctr   r9
+       ld      r9,PACA_EXRFI+EX_R9(r13)
+       ld      r10,PACA_EXRFI+EX_R10(r13)
+       ld      r11,PACA_EXRFI+EX_R11(r13)
+       ld      r12,PACA_EXRFI+EX_R12(r13)
+       ld      r8,PACA_EXRFI+EX_R13(r13)
+       GET_SCRATCH0(r13);
+       rfid
+
+TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+       SET_SCRATCH0(r13);
+       GET_PACA(r13);
+       std     r9,PACA_EXRFI+EX_R9(r13)
+       std     r10,PACA_EXRFI+EX_R10(r13)
+       std     r11,PACA_EXRFI+EX_R11(r13)
+       std     r12,PACA_EXRFI+EX_R12(r13)
+       std     r8,PACA_EXRFI+EX_R13(r13)
+       mfctr   r9
+       ld      r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+       ld      r11,PACA_L1D_FLUSH_SETS(r13)
+       ld      r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
+       /*
+        * The load adresses are at staggered offsets within cachelines,
+        * which suits some pipelines better (on others it should not
+        * hurt).
+        */
+       addi    r12,r12,8
+       mtctr   r11
+       DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+
+       /* order ld/st prior to dcbt stop all streams with flushing */
+       sync
+1:     li      r8,0
+       .rept   8 /* 8-way set associative */
+       ldx     r11,r10,r8
+       add     r8,r8,r12
+       xor     r11,r11,r11     // Ensure r11 is 0 even if fallback area is not
+       add     r8,r8,r11       // Add 0, this creates a dependency on the ldx
+       .endr
+       addi    r10,r10,128 /* 128 byte cache line */
+       bdnz    1b
+
+       mtctr   r9
+       ld      r9,PACA_EXRFI+EX_R9(r13)
+       ld      r10,PACA_EXRFI+EX_R10(r13)
+       ld      r11,PACA_EXRFI+EX_R11(r13)
+       ld      r12,PACA_EXRFI+EX_R12(r13)
+       ld      r8,PACA_EXRFI+EX_R13(r13)
+       GET_SCRATCH0(r13);
+       hrfid
+
 /*
  * Real mode exceptions actually use this too, but alternate
  * instruction code patches (which end up in the common .text area)
@@ -1441,7 +1552,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
        addi    r13, r13, 4
        mtspr   SPRN_SRR0, r13
        GET_SCRATCH0(r13)
-       rfid
+       RFI_TO_KERNEL
        b       .
 
 TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
@@ -1453,7 +1564,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
        addi    r13, r13, 4
        mtspr   SPRN_HSRR0, r13
        GET_SCRATCH0(r13)
-       hrfid
+       HRFI_TO_KERNEL
        b       .
 #endif
 
index 04ea5c0..3c2c268 100644 (file)
@@ -1462,25 +1462,6 @@ static void fadump_init_files(void)
        return;
 }
 
-static int fadump_panic_event(struct notifier_block *this,
-                             unsigned long event, void *ptr)
-{
-       /*
-        * If firmware-assisted dump has been registered then trigger
-        * firmware-assisted dump and let firmware handle everything
-        * else. If this returns, then fadump was not registered, so
-        * go through the rest of the panic path.
-        */
-       crash_fadump(NULL, ptr);
-
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block fadump_panic_block = {
-       .notifier_call = fadump_panic_event,
-       .priority = INT_MIN /* may not return; must be done last */
-};
-
 /*
  * Prepare for firmware-assisted dump.
  */
@@ -1513,9 +1494,6 @@ int __init setup_fadump(void)
                init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
        fadump_init_files();
 
-       atomic_notifier_chain_register(&panic_notifier_list,
-                                       &fadump_panic_block);
-
        return 1;
 }
 subsys_initcall(setup_fadump);
index 8ac0bd2..3280953 100644 (file)
@@ -623,7 +623,9 @@ BEGIN_FTR_SECTION
         * NOTE, we rely on r0 being 0 from above.
         */
        mtspr   SPRN_IAMR,r0
+BEGIN_FTR_SECTION_NESTED(42)
        mtspr   SPRN_AMOR,r0
+END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
        /* save regs for local vars on new stack.
index bfdd783..72be0c3 100644 (file)
@@ -1403,7 +1403,7 @@ void show_regs(struct pt_regs * regs)
 
        printk("NIP:  "REG" LR: "REG" CTR: "REG"\n",
               regs->nip, regs->link, regs->ctr);
-       printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
+       printk("REGS: %px TRAP: %04lx   %s  (%s)\n",
               regs, regs->trap, print_tainted(), init_utsname()->release);
        printk("MSR:  "REG" ", regs->msr);
        print_msr_bits(regs->msr);
@@ -1569,16 +1569,22 @@ void arch_release_task_struct(struct task_struct *t)
  */
 int set_thread_tidr(struct task_struct *t)
 {
+       int rc;
+
        if (!cpu_has_feature(CPU_FTR_ARCH_300))
                return -EINVAL;
 
        if (t != current)
                return -EINVAL;
 
-       t->thread.tidr = assign_thread_tidr();
-       if (t->thread.tidr < 0)
-               return t->thread.tidr;
+       if (t->thread.tidr)
+               return 0;
+
+       rc = assign_thread_tidr();
+       if (rc < 0)
+               return rc;
 
+       t->thread.tidr = rc;
        mtspr(SPRN_TIDR, t->thread.tidr);
 
        return 0;
index 2075322..8fd3a70 100644 (file)
@@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        unsigned short maj;
        unsigned short min;
 
-       /* We only show online cpus: disable preempt (overzealous, I
-        * knew) to prevent cpu going down. */
-       preempt_disable();
-       if (!cpu_online(cpu_id)) {
-               preempt_enable();
-               return 0;
-       }
-
 #ifdef CONFIG_SMP
        pvr = per_cpu(cpu_pvr, cpu_id);
 #else
@@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 #ifdef CONFIG_SMP
        seq_printf(m, "\n");
 #endif
-
-       preempt_enable();
-
        /* If this is the last cpu, print the summary */
        if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
                show_cpuinfo_summary(m);
@@ -704,6 +693,30 @@ int check_legacy_ioport(unsigned long base_port)
 }
 EXPORT_SYMBOL(check_legacy_ioport);
 
+static int ppc_panic_event(struct notifier_block *this,
+                             unsigned long event, void *ptr)
+{
+       /*
+        * If firmware-assisted dump has been registered then trigger
+        * firmware-assisted dump and let firmware handle everything else.
+        */
+       crash_fadump(NULL, ptr);
+       ppc_md.panic(ptr);  /* May not return */
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_panic_block = {
+       .notifier_call = ppc_panic_event,
+       .priority = INT_MIN /* may not return; must be done last */
+};
+
+void __init setup_panic(void)
+{
+       if (!ppc_md.panic)
+               return;
+       atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
+}
+
 #ifdef CONFIG_CHECK_CACHE_COHERENCY
 /*
  * For platforms that have configurable cache-coherency.  This function
@@ -848,6 +861,9 @@ void __init setup_arch(char **cmdline_p)
        /* Probe the machine type, establish ppc_md. */
        probe_machine();
 
+       /* Setup panic notifier if requested by the platform. */
+       setup_panic();
+
        /*
         * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
         * it from their respective probe() function.
index 8956a98..e67413f 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/memory.h>
 #include <linux/nmi.h>
 
+#include <asm/debugfs.h>
 #include <asm/io.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -801,3 +802,141 @@ static int __init disable_hardlockup_detector(void)
        return 0;
 }
 early_initcall(disable_hardlockup_detector);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+static enum l1d_flush_type enabled_flush_types;
+static void *l1d_flush_fallback_area;
+static bool no_rfi_flush;
+bool rfi_flush;
+
+static int __init handle_no_rfi_flush(char *p)
+{
+       pr_info("rfi-flush: disabled on command line.");
+       no_rfi_flush = true;
+       return 0;
+}
+early_param("no_rfi_flush", handle_no_rfi_flush);
+
+/*
+ * The RFI flush is not KPTI, but because users will see doco that says to use
+ * nopti we hijack that option here to also disable the RFI flush.
+ */
+static int __init handle_no_pti(char *p)
+{
+       pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
+       handle_no_rfi_flush(NULL);
+       return 0;
+}
+early_param("nopti", handle_no_pti);
+
+static void do_nothing(void *unused)
+{
+       /*
+        * We don't need to do the flush explicitly, just enter+exit kernel is
+        * sufficient, the RFI exit handlers will do the right thing.
+        */
+}
+
+void rfi_flush_enable(bool enable)
+{
+       if (rfi_flush == enable)
+               return;
+
+       if (enable) {
+               do_rfi_flush_fixups(enabled_flush_types);
+               on_each_cpu(do_nothing, NULL, 1);
+       } else
+               do_rfi_flush_fixups(L1D_FLUSH_NONE);
+
+       rfi_flush = enable;
+}
+
+static void init_fallback_flush(void)
+{
+       u64 l1d_size, limit;
+       int cpu;
+
+       l1d_size = ppc64_caches.l1d.size;
+       limit = min(safe_stack_limit(), ppc64_rma_size);
+
+       /*
+        * Align to L1d size, and size it at 2x L1d size, to catch possible
+        * hardware prefetch runoff. We don't have a recipe for load patterns to
+        * reliably avoid the prefetcher.
+        */
+       l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
+       memset(l1d_flush_fallback_area, 0, l1d_size * 2);
+
+       for_each_possible_cpu(cpu) {
+               /*
+                * The fallback flush is currently coded for 8-way
+                * associativity. Different associativity is possible, but it
+                * will be treated as 8-way and may not evict the lines as
+                * effectively.
+                *
+                * 128 byte lines are mandatory.
+                */
+               u64 c = l1d_size / 8;
+
+               paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
+               paca[cpu].l1d_flush_congruence = c;
+               paca[cpu].l1d_flush_sets = c / 128;
+       }
+}
+
+void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
+{
+       if (types & L1D_FLUSH_FALLBACK) {
+               pr_info("rfi-flush: Using fallback displacement flush\n");
+               init_fallback_flush();
+       }
+
+       if (types & L1D_FLUSH_ORI)
+               pr_info("rfi-flush: Using ori type flush\n");
+
+       if (types & L1D_FLUSH_MTTRIG)
+               pr_info("rfi-flush: Using mttrig type flush\n");
+
+       enabled_flush_types = types;
+
+       if (!no_rfi_flush)
+               rfi_flush_enable(enable);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int rfi_flush_set(void *data, u64 val)
+{
+       if (val == 1)
+               rfi_flush_enable(true);
+       else if (val == 0)
+               rfi_flush_enable(false);
+       else
+               return -EINVAL;
+
+       return 0;
+}
+
+static int rfi_flush_get(void *data, u64 *val)
+{
+       *val = rfi_flush ? 1 : 0;
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+
+static __init int rfi_flush_debugfs_init(void)
+{
+       debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
+       return 0;
+}
+device_initcall(rfi_flush_debugfs_init);
+#endif
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       if (rfi_flush)
+               return sprintf(buf, "Mitigation: RFI Flush\n");
+
+       return sprintf(buf, "Vulnerable\n");
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
index 0494e15..307843d 100644 (file)
@@ -132,6 +132,15 @@ SECTIONS
        /* Read-only data */
        RO_DATA(PAGE_SIZE)
 
+#ifdef CONFIG_PPC64
+       . = ALIGN(8);
+       __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+               __start___rfi_flush_fixup = .;
+               *(__rfi_flush_fixup)
+               __stop___rfi_flush_fixup = .;
+       }
+#endif
+
        EXCEPTION_TABLE(0)
 
        NOTES :kernel :notes
index 29ebe2f..a93d719 100644 (file)
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
                gpte->may_read = true;
                gpte->may_write = true;
                gpte->page_size = MMU_PAGE_4K;
+               gpte->wimg = HPTE_R_M;
 
                return 0;
        }
index 235319c..b73dbc9 100644 (file)
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
        u32 order;
 
        /* These fields protected by kvm->lock */
+
+       /* Possible values and their usage:
+        *  <0     an error occurred during allocation,
+        *  -EBUSY allocation is in the progress,
+        *  0      allocation made successfuly.
+        */
        int error;
-       bool prepare_done;
 
-       /* Private to the work thread, until prepare_done is true,
-        * then protected by kvm->resize_hpt_sem */
+       /* Private to the work thread, until error != -EBUSY,
+        * then protected by kvm->lock.
+        */
        struct kvm_hpt_info hpt;
 };
 
@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
                 * Reset all the reverse-mapping chains for all memslots
                 */
                kvmppc_rmap_reset(kvm);
-               /* Ensure that each vcpu will flush its TLB on next entry. */
-               cpumask_setall(&kvm->arch.need_tlb_flush);
                err = 0;
                goto out;
        }
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
        kvmppc_set_hpt(kvm, &info);
 
 out:
+       if (err == 0)
+               /* Ensure that each vcpu will flush its TLB on next entry. */
+               cpumask_setall(&kvm->arch.need_tlb_flush);
+
        mutex_unlock(&kvm->lock);
        return err;
 }
@@ -1238,8 +1246,9 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
        unsigned long vpte, rpte, guest_rpte;
        int ret;
        struct revmap_entry *rev;
-       unsigned long apsize, psize, avpn, pteg, hash;
+       unsigned long apsize, avpn, pteg, hash;
        unsigned long new_idx, new_pteg, replace_vpte;
+       int pshift;
 
        hptep = (__be64 *)(old->virt + (idx << 4));
 
@@ -1298,8 +1307,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
                goto out;
 
        rpte = be64_to_cpu(hptep[1]);
-       psize = hpte_base_page_size(vpte, rpte);
-       avpn = HPTE_V_AVPN_VAL(vpte) & ~((psize - 1) >> 23);
+       pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
+       avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
        pteg = idx / HPTES_PER_GROUP;
        if (vpte & HPTE_V_SECONDARY)
                pteg = ~pteg;
@@ -1311,20 +1320,20 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
                offset = (avpn & 0x1f) << 23;
                vsid = avpn >> 5;
                /* We can find more bits from the pteg value */
-               if (psize < (1ULL << 23))
-                       offset |= ((vsid ^ pteg) & old_hash_mask) * psize;
+               if (pshift < 23)
+                       offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
 
-               hash = vsid ^ (offset / psize);
+               hash = vsid ^ (offset >> pshift);
        } else {
                unsigned long offset, vsid;
 
                /* We only have 40 - 23 bits of seg_off in avpn */
                offset = (avpn & 0x1ffff) << 23;
                vsid = avpn >> 17;
-               if (psize < (1ULL << 23))
-                       offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) * psize;
+               if (pshift < 23)
+                       offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
 
-               hash = vsid ^ (vsid << 25) ^ (offset / psize);
+               hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
        }
 
        new_pteg = hash & new_hash_mask;
@@ -1412,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
 
 static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
 {
-       BUG_ON(kvm->arch.resize_hpt != resize);
+       if (WARN_ON(!mutex_is_locked(&kvm->lock)))
+               return;
 
        if (!resize)
                return;
 
-       if (resize->hpt.virt)
-               kvmppc_free_hpt(&resize->hpt);
+       if (resize->error != -EBUSY) {
+               if (resize->hpt.virt)
+                       kvmppc_free_hpt(&resize->hpt);
+               kfree(resize);
+       }
 
-       kvm->arch.resize_hpt = NULL;
-       kfree(resize);
+       if (kvm->arch.resize_hpt == resize)
+               kvm->arch.resize_hpt = NULL;
 }
 
 static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1430,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
                                                     struct kvm_resize_hpt,
                                                     work);
        struct kvm *kvm = resize->kvm;
-       int err;
+       int err = 0;
 
-       resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
-                        resize->order);
-
-       err = resize_hpt_allocate(resize);
+       if (WARN_ON(resize->error != -EBUSY))
+               return;
 
        mutex_lock(&kvm->lock);
 
+       /* Request is still current? */
+       if (kvm->arch.resize_hpt == resize) {
+               /* We may request large allocations here:
+                * do not sleep with kvm->lock held for a while.
+                */
+               mutex_unlock(&kvm->lock);
+
+               resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
+                                resize->order);
+
+               err = resize_hpt_allocate(resize);
+
+               /* We have strict assumption about -EBUSY
+                * when preparing for HPT resize.
+                */
+               if (WARN_ON(err == -EBUSY))
+                       err = -EINPROGRESS;
+
+               mutex_lock(&kvm->lock);
+               /* It is possible that kvm->arch.resize_hpt != resize
+                * after we grab kvm->lock again.
+                */
+       }
+
        resize->error = err;
-       resize->prepare_done = true;
+
+       if (kvm->arch.resize_hpt != resize)
+               resize_hpt_release(kvm, resize);
 
        mutex_unlock(&kvm->lock);
 }
@@ -1465,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
 
        if (resize) {
                if (resize->order == shift) {
-                       /* Suitable resize in progress */
-                       if (resize->prepare_done) {
-                               ret = resize->error;
-                               if (ret != 0)
-                                       resize_hpt_release(kvm, resize);
-                       } else {
+                       /* Suitable resize in progress? */
+                       ret = resize->error;
+                       if (ret == -EBUSY)
                                ret = 100; /* estimated time in ms */
-                       }
+                       else if (ret)
+                               resize_hpt_release(kvm, resize);
 
                        goto out;
                }
@@ -1492,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
                ret = -ENOMEM;
                goto out;
        }
+
+       resize->error = -EBUSY;
        resize->order = shift;
        resize->kvm = kvm;
        INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1546,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
        if (!resize || (resize->order != shift))
                goto out;
 
-       ret = -EBUSY;
-       if (!resize->prepare_done)
-               goto out;
-
        ret = resize->error;
-       if (ret != 0)
+       if (ret)
                goto out;
 
        ret = resize_hpt_rehash(resize);
-       if (ret != 0)
+       if (ret)
                goto out;
 
        resize_hpt_pivot(resize);
@@ -1801,6 +1834,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
        ssize_t nb;
        long int err, ret;
        int mmu_ready;
+       int pshift;
 
        if (!access_ok(VERIFY_READ, buf, count))
                return -EFAULT;
@@ -1855,6 +1889,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                        err = -EINVAL;
                        if (!(v & HPTE_V_VALID))
                                goto out;
+                       pshift = kvmppc_hpte_base_page_shift(v, r);
+                       if (pshift <= 0)
+                               goto out;
                        lbuf += 2;
                        nb += HPTE_SIZE;
 
@@ -1869,14 +1906,18 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                                goto out;
                        }
                        if (!mmu_ready && is_vrma_hpte(v)) {
-                               unsigned long psize = hpte_base_page_size(v, r);
-                               unsigned long senc = slb_pgsize_encoding(psize);
-                               unsigned long lpcr;
+                               unsigned long senc, lpcr;
 
+                               senc = slb_pgsize_encoding(1ul << pshift);
                                kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
                                        (VRMA_VSID << SLB_VSID_SHIFT_1T);
-                               lpcr = senc << (LPCR_VRMASD_SH - 4);
-                               kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+                               if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+                                       lpcr = senc << (LPCR_VRMASD_SH - 4);
+                                       kvmppc_update_lpcr(kvm, lpcr,
+                                                          LPCR_VRMASD);
+                               } else {
+                                       kvmppc_setup_partition_table(kvm);
+                               }
                                mmu_ready = 1;
                        }
                        ++i;
index 79ea3d9..2d46037 100644 (file)
@@ -120,7 +120,6 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
-static void kvmppc_setup_partition_table(struct kvm *kvm);
 
 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
                int *ip)
@@ -3574,7 +3573,7 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
        return;
 }
 
-static void kvmppc_setup_partition_table(struct kvm *kvm)
+void kvmppc_setup_partition_table(struct kvm *kvm)
 {
        unsigned long dw0, dw1;
 
index 2659844..9c61f73 100644 (file)
@@ -79,7 +79,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
        mtmsrd  r0,1            /* clear RI in MSR */
        mtsrr0  r5
        mtsrr1  r6
-       RFI
+       RFI_TO_KERNEL
 
 kvmppc_call_hv_entry:
 BEGIN_FTR_SECTION
@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        mtmsrd  r6, 1                   /* Clear RI in MSR */
        mtsrr0  r8
        mtsrr1  r7
-       RFI
+       RFI_TO_KERNEL
 
        /* Virtual-mode return */
 .Lvirt_return:
@@ -1167,8 +1167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
        ld      r0, VCPU_GPR(R0)(r4)
        ld      r4, VCPU_GPR(R4)(r4)
-
-       hrfid
+       HRFI_TO_GUEST
        b       .
 
 secondary_too_late:
@@ -3320,7 +3319,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
        ld      r4, PACAKMSR(r13)
        mtspr   SPRN_SRR0, r3
        mtspr   SPRN_SRR1, r4
-       rfid
+       RFI_TO_KERNEL
 9:     addi    r3, r1, STACK_FRAME_OVERHEAD
        bl      kvmppc_bad_interrupt
        b       9b
index d0dc862..7deaeeb 100644 (file)
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 #define MSR_USER32 MSR_USER
 #define MSR_USER64 MSR_USER
 #define HW_PAGE_SIZE PAGE_SIZE
+#define HPTE_R_M   _PAGE_COHERENT
 #endif
 
 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                pte.eaddr = eaddr;
                pte.vpage = eaddr >> 12;
                pte.page_size = MMU_PAGE_64K;
+               pte.wimg = HPTE_R_M;
        }
 
        switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
index 42a4b23..34a5ade 100644 (file)
@@ -46,6 +46,9 @@
 
 #define FUNC(name)             name
 
+#define RFI_TO_KERNEL  RFI
+#define RFI_TO_GUEST   RFI
+
 .macro INTERRUPT_TRAMPOLINE intno
 
 .global kvmppc_trampoline_\intno
@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
        GET_SCRATCH0(r13)
 
        /* And get back into the code */
-       RFI
+       RFI_TO_KERNEL
 #endif
 
 /*
@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
        ori     r5, r5, MSR_EE
        mtsrr0  r7
        mtsrr1  r6
-       RFI
+       RFI_TO_KERNEL
 
 #include "book3s_segment.S"
index 2a2b96d..93a180c 100644 (file)
@@ -156,7 +156,7 @@ no_dcbz32_on:
        PPC_LL  r9, SVCPU_R9(r3)
        PPC_LL  r3, (SVCPU_R3)(r3)
 
-       RFI
+       RFI_TO_GUEST
 kvmppc_handler_trampoline_enter_end:
 
 
@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
        cmpwi   r12, BOOK3S_INTERRUPT_DOORBELL
        beqa    BOOK3S_INTERRUPT_DOORBELL
 
-       RFI
+       RFI_TO_KERNEL
 kvmppc_handler_trampoline_exit_end:
index bf45784..0d750d2 100644 (file)
@@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
 
        /* Return the per-cpu state for state saving/migration */
        return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
-              (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT;
+              (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
+              (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
 }
 
 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
@@ -1558,7 +1559,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
 
        /*
         * Restore P and Q. If the interrupt was pending, we
-        * force both P and Q, which will trigger a resend.
+        * force Q and !P, which will trigger a resend.
         *
         * That means that a guest that had both an interrupt
         * pending (queued) and Q set will restore with only
@@ -1566,7 +1567,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
         * is perfectly fine as coalescing interrupts that haven't
         * been presented yet is always allowed.
         */
-       if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
+       if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
                state->old_p = true;
        if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
                state->old_q = true;
index 6b6c53c..0a7c887 100644 (file)
 #include <asm/iommu.h>
 #include <asm/switch_to.h>
 #include <asm/xive.h>
+#ifdef CONFIG_PPC_PSERIES
+#include <asm/hvcall.h>
+#include <asm/plpar_wrappers.h>
+#endif
 
 #include "timing.h"
 #include "irq.h"
@@ -548,6 +552,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #ifdef CONFIG_KVM_XICS
        case KVM_CAP_IRQ_XICS:
 #endif
+       case KVM_CAP_PPC_GET_CPU_CHAR:
                r = 1;
                break;
 
@@ -1407,7 +1412,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r;
-       sigset_t sigsaved;
 
        if (vcpu->mmio_needed) {
                vcpu->mmio_needed = 0;
@@ -1448,16 +1452,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 #endif
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (run->immediate_exit)
                r = -EINTR;
        else
                r = kvmppc_vcpu_run(run, vcpu);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
@@ -1762,6 +1764,124 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
        return r;
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * These functions check whether the underlying hardware is safe
+ * against attacks based on observing the effects of speculatively
+ * executed instructions, and whether it supplies instructions for
+ * use in workarounds.  The information comes from firmware, either
+ * via the device tree on powernv platforms or from an hcall on
+ * pseries platforms.
+ */
+#ifdef CONFIG_PPC_PSERIES
+static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       struct h_cpu_char_result c;
+       unsigned long rc;
+
+       if (!machine_is(pseries))
+               return -ENOTTY;
+
+       rc = plpar_get_cpu_characteristics(&c);
+       if (rc == H_SUCCESS) {
+               cp->character = c.character;
+               cp->behaviour = c.behaviour;
+               cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
+                       KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
+                       KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
+                       KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
+                       KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
+                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+               cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
+                       KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
+                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+       }
+       return 0;
+}
+#else
+static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       return -ENOTTY;
+}
+#endif
+
+static inline bool have_fw_feat(struct device_node *fw_features,
+                               const char *state, const char *name)
+{
+       struct device_node *np;
+       bool r = false;
+
+       np = of_get_child_by_name(fw_features, name);
+       if (np) {
+               r = of_property_read_bool(np, state);
+               of_node_put(np);
+       }
+       return r;
+}
+
+static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       struct device_node *np, *fw_features;
+       int r;
+
+       memset(cp, 0, sizeof(*cp));
+       r = pseries_get_cpu_char(cp);
+       if (r != -ENOTTY)
+               return r;
+
+       np = of_find_node_by_name(NULL, "ibm,opal");
+       if (np) {
+               fw_features = of_get_child_by_name(np, "fw-features");
+               of_node_put(np);
+               if (!fw_features)
+                       return 0;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-spec-barrier-ori31,31,0"))
+                       cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-bcctrl-serialized"))
+                       cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-l1d-flush-ori30,30,0"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-l1d-flush-trig2"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-l1d-thread-split"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-count-cache-disabled"))
+                       cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+               cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
+                       KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
+                       KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
+                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+
+               if (have_fw_feat(fw_features, "enabled",
+                                "speculation-policy-favor-security"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
+               if (!have_fw_feat(fw_features, "disabled",
+                                 "needs-l1d-flush-msr-pr-0-to-1"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
+               if (!have_fw_feat(fw_features, "disabled",
+                                 "needs-spec-barrier-for-bound-checks"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+               cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
+                       KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
+                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+
+               of_node_put(fw_features);
+       }
+
+       return 0;
+}
+#endif
+
 long kvm_arch_vm_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg)
 {
@@ -1864,6 +1984,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        r = -EFAULT;
                break;
        }
+       case KVM_PPC_GET_CPU_CHAR: {
+               struct kvm_ppc_cpu_char cpuchar;
+
+               r = kvmppc_get_cpu_char(&cpuchar);
+               if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
+                       r = -EFAULT;
+               break;
+       }
        default: {
                struct kvm *kvm = filp->private_data;
                r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
index 41cf5ae..a95ea00 100644 (file)
@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
        }
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+void do_rfi_flush_fixups(enum l1d_flush_type types)
+{
+       unsigned int instrs[3], *dest;
+       long *start, *end;
+       int i;
+
+       start = PTRRELOC(&__start___rfi_flush_fixup),
+       end = PTRRELOC(&__stop___rfi_flush_fixup);
+
+       instrs[0] = 0x60000000; /* nop */
+       instrs[1] = 0x60000000; /* nop */
+       instrs[2] = 0x60000000; /* nop */
+
+       if (types & L1D_FLUSH_FALLBACK)
+               /* b .+16 to fallback flush */
+               instrs[0] = 0x48000010;
+
+       i = 0;
+       if (types & L1D_FLUSH_ORI) {
+               instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+               instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+       }
+
+       if (types & L1D_FLUSH_MTTRIG)
+               instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+
+       for (i = 0; start < end; start++, i++) {
+               dest = (void *)start + *start;
+
+               pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+               patch_instruction(dest, instrs[0]);
+               patch_instruction(dest + 1, instrs[1]);
+               patch_instruction(dest + 2, instrs[2]);
+       }
+
+       printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
 {
        long *start, *end;
index 4797d08..6e1e390 100644 (file)
@@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
        return __bad_area(regs, address, SEGV_MAPERR);
 }
 
+static noinline int bad_access(struct pt_regs *regs, unsigned long address)
+{
+       return __bad_area(regs, address, SEGV_ACCERR);
+}
+
 static int do_sigbus(struct pt_regs *regs, unsigned long address,
                     unsigned int fault)
 {
@@ -490,7 +495,7 @@ retry:
 
 good_area:
        if (unlikely(access_error(is_write, is_exec, vma)))
-               return bad_area(regs, address);
+               return bad_access(regs, address);
 
        /*
         * If for any reason at all we couldn't handle the fault,
index 3848af1..640cf56 100644 (file)
@@ -47,7 +47,8 @@
 
 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
-static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
+                                               int apsize, int ssize)
 {
        unsigned long va;
        unsigned int penc;
@@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
                             : "memory");
                break;
        }
-       trace_tlbie(0, 0, va, 0, 0, 0, 0);
+       return va;
+}
+
+static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+{
+       unsigned long rb;
+
+       rb = ___tlbie(vpn, psize, apsize, ssize);
+       trace_tlbie(0, 0, rb, 0, 0, 0, 0);
 }
 
 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
@@ -652,7 +661,7 @@ static void native_hpte_clear(void)
                if (hpte_v & HPTE_V_VALID) {
                        hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
                        hptep->v = 0;
-                       __tlbie(vpn, psize, apsize, ssize);
+                       ___tlbie(vpn, psize, apsize, ssize);
                }
        }
 
index 46d74e8..d183b48 100644 (file)
@@ -763,7 +763,8 @@ emit_clear:
                        func = (u8 *) __bpf_call_base + imm;
 
                        /* Save skb pointer if we need to re-cache skb data */
-                       if (bpf_helper_changes_pkt_data(func))
+                       if ((ctx->seen & SEEN_SKB) &&
+                           bpf_helper_changes_pkt_data(func))
                                PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
 
                        bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -772,7 +773,8 @@ emit_clear:
                        PPC_MR(b2p[BPF_REG_0], 3);
 
                        /* refresh skb cache */
-                       if (bpf_helper_changes_pkt_data(func)) {
+                       if ((ctx->seen & SEEN_SKB) &&
+                           bpf_helper_changes_pkt_data(func)) {
                                /* reload skb pointer to r3 */
                                PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
                                bpf_jit_emit_skb_loads(image, ctx);
index 9e3da16..fce5457 100644 (file)
@@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
        int ret;
        __u64 target;
 
-       if (is_kernel_addr(addr))
-               return branch_target((unsigned int *)addr);
+       if (is_kernel_addr(addr)) {
+               if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
+                       return 0;
+
+               return branch_target(&instr);
+       }
 
        /* Userspace: need copy instruction here then translate it */
        pagefault_disable();
@@ -1415,7 +1419,7 @@ static int collect_events(struct perf_event *group, int max_count,
        int n = 0;
        struct perf_event *event;
 
-       if (!is_software_event(group)) {
+       if (group->pmu->task_ctx_nr == perf_hw_context) {
                if (n >= max_count)
                        return -1;
                ctrs[n] = group;
@@ -1423,7 +1427,7 @@ static int collect_events(struct perf_event *group, int max_count,
                events[n++] = group->hw.config;
        }
        list_for_each_entry(event, &group->sibling_list, group_entry) {
-               if (!is_software_event(event) &&
+               if (event->pmu->task_ctx_nr == perf_hw_context &&
                    event->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
                                return -1;
index 0ead3cd..be4e7f8 100644 (file)
@@ -309,6 +309,19 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
        if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
                return 0;
 
+       /*
+        * Check whether nest_imc is registered. We could end up here if the
+        * cpuhotplug callback registration fails. i.e, callback invokes the
+        * offline path for all successfully registered nodes. At this stage,
+        * nest_imc pmu will not be registered and we should return here.
+        *
+        * We return with a zero since this is not an offline failure. And
+        * cpuhp_setup_state() returns the actual failure reason to the caller,
+        * which in turn will call the cleanup routine.
+        */
+       if (!nest_pmus)
+               return 0;
+
        /*
         * Now that this cpu is one of the designated,
         * find a next cpu a) which is online and b) in same chip.
@@ -1171,6 +1184,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
                if (nest_pmus == 1) {
                        cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
                        kfree(nest_imc_refc);
+                       kfree(per_nest_pmu_arr);
                }
 
                if (nest_pmus > 0)
@@ -1195,7 +1209,6 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
                kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
        kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
        kfree(pmu_ptr);
-       kfree(per_nest_pmu_arr);
        return;
 }
 
@@ -1309,6 +1322,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                        ret = nest_pmu_cpumask_init();
                        if (ret) {
                                mutex_unlock(&nest_init_lock);
+                               kfree(nest_imc_refc);
+                               kfree(per_nest_pmu_arr);
                                goto err_free;
                        }
                }
index 1edfbc1..4fb21e1 100644 (file)
 #include <asm/kexec.h>
 #include <asm/smp.h>
 #include <asm/tm.h>
+#include <asm/setup.h>
 
 #include "powernv.h"
 
+static void pnv_setup_rfi_flush(void)
+{
+       struct device_node *np, *fw_features;
+       enum l1d_flush_type type;
+       int enable;
+
+       /* Default to fallback in case fw-features are not available */
+       type = L1D_FLUSH_FALLBACK;
+       enable = 1;
+
+       np = of_find_node_by_name(NULL, "ibm,opal");
+       fw_features = of_get_child_by_name(np, "fw-features");
+       of_node_put(np);
+
+       if (fw_features) {
+               np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+               if (np && of_property_read_bool(np, "enabled"))
+                       type = L1D_FLUSH_MTTRIG;
+
+               of_node_put(np);
+
+               np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
+               if (np && of_property_read_bool(np, "enabled"))
+                       type = L1D_FLUSH_ORI;
+
+               of_node_put(np);
+
+               /* Enable unless firmware says NOT to */
+               enable = 2;
+               np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
+               if (np && of_property_read_bool(np, "disabled"))
+                       enable--;
+
+               of_node_put(np);
+
+               np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
+               if (np && of_property_read_bool(np, "disabled"))
+                       enable--;
+
+               of_node_put(np);
+               of_node_put(fw_features);
+       }
+
+       setup_rfi_flush(type, enable > 0);
+}
+
 static void __init pnv_setup_arch(void)
 {
        set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
 
+       pnv_setup_rfi_flush();
+
        /* Initialize SMP */
        pnv_smp_init();
 
index 9dabea6..6244bc8 100644 (file)
@@ -104,6 +104,20 @@ static void __noreturn ps3_halt(void)
        ps3_sys_manager_halt(); /* never returns */
 }
 
+static void ps3_panic(char *str)
+{
+       DBG("%s:%d %s\n", __func__, __LINE__, str);
+
+       smp_send_stop();
+       printk("\n");
+       printk("   System does not reboot automatically.\n");
+       printk("   Please press POWER button.\n");
+       printk("\n");
+
+       while(1)
+               lv1_pause(1);
+}
+
 #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
     defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
 static void __init prealloc(struct ps3_prealloc *p)
@@ -255,6 +269,7 @@ define_machine(ps3) {
        .probe                          = ps3_probe,
        .setup_arch                     = ps3_setup_arch,
        .init_IRQ                       = ps3_init_IRQ,
+       .panic                          = ps3_panic,
        .get_boot_time                  = ps3_get_boot_time,
        .set_dabr                       = ps3_set_dabr,
        .calibrate_decr                 = ps3_calibrate_decr,
index 6e35780..a0b20c0 100644 (file)
@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
 
 static CLASS_ATTR_RW(dlpar);
 
-static int __init pseries_dlpar_init(void)
+int __init dlpar_workqueue_init(void)
 {
+       if (pseries_hp_wq)
+               return 0;
+
        pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
-                                       WQ_UNBOUND, 1);
+                       WQ_UNBOUND, 1);
+
+       return pseries_hp_wq ? 0 : -ENOMEM;
+}
+
+static int __init dlpar_sysfs_init(void)
+{
+       int rc;
+
+       rc = dlpar_workqueue_init();
+       if (rc)
+               return rc;
+
        return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
 }
-machine_device_initcall(pseries, pseries_dlpar_init);
+machine_device_initcall(pseries, dlpar_sysfs_init);
 
index 4470a31..1ae1d9f 100644 (file)
@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void)
        return CMO_PageSize;
 }
 
+int dlpar_workqueue_init(void);
+
 #endif /* _PSERIES_PSERIES_H */
index 4923ffe..81d8614 100644 (file)
@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void)
        /* Hotplug Events */
        np = of_find_node_by_path("/event-sources/hot-plug-events");
        if (np != NULL) {
-               request_event_sources_irqs(np, ras_hotplug_interrupt,
+               if (dlpar_workqueue_init() == 0)
+                       request_event_sources_irqs(np, ras_hotplug_interrupt,
                                           "RAS_HOTPLUG");
                of_node_put(np);
        }
index 5f1beb8..ae4f596 100644 (file)
@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
        of_pci_check_probe_only();
 }
 
+static void pseries_setup_rfi_flush(void)
+{
+       struct h_cpu_char_result result;
+       enum l1d_flush_type types;
+       bool enable;
+       long rc;
+
+       /* Enable by default */
+       enable = true;
+
+       rc = plpar_get_cpu_characteristics(&result);
+       if (rc == H_SUCCESS) {
+               types = L1D_FLUSH_NONE;
+
+               if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+                       types |= L1D_FLUSH_MTTRIG;
+               if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+                       types |= L1D_FLUSH_ORI;
+
+               /* Use fallback if nothing set in hcall */
+               if (types == L1D_FLUSH_NONE)
+                       types = L1D_FLUSH_FALLBACK;
+
+               if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+                       enable = false;
+       } else {
+               /* Default to fallback if case hcall is not available */
+               types = L1D_FLUSH_FALLBACK;
+       }
+
+       setup_rfi_flush(types, enable);
+}
+
 static void __init pSeries_setup_arch(void)
 {
        set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
 
        fwnmi_init();
 
+       pseries_setup_rfi_flush();
+
        /* By default, only probe PCI (can be overridden by rtas_pci) */
        pci_add_flags(PCI_PROBE_ONLY);
 
@@ -726,6 +761,7 @@ define_machine(pseries) {
        .pcibios_fixup          = pSeries_final_fixup,
        .restart                = rtas_restart,
        .halt                   = rtas_halt,
+       .panic                  = rtas_os_term,
        .get_boot_time          = rtas_get_boot_time,
        .get_rtc_time           = rtas_get_rtc_time,
        .set_rtc_time           = rtas_set_rtc_time,
index 44cbf4c..df95102 100644 (file)
@@ -354,6 +354,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
 }
 
 static struct lock_class_key fsl_msi_irq_class;
+static struct lock_class_key fsl_msi_irq_request_class;
 
 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
                               int offset, int irq_index)
@@ -373,7 +374,8 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
                dev_err(&dev->dev, "No memory for MSI cascade data\n");
                return -ENOMEM;
        }
-       irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
+       irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
+                             &fsl_msi_irq_request_class);
        cascade_data->index = offset;
        cascade_data->msi_data = msi;
        cascade_data->virq = virt_msir;
index 1b2d8cb..0ddc7ac 100644 (file)
@@ -1590,7 +1590,7 @@ static void print_bug_trap(struct pt_regs *regs)
        printf("kernel BUG at %s:%u!\n",
               bug->file, bug->line);
 #else
-       printf("kernel BUG at %p!\n", (void *)bug->bug_addr);
+       printf("kernel BUG at %px!\n", (void *)bug->bug_addr);
 #endif
 #endif /* CONFIG_BUG */
 }
@@ -2329,7 +2329,7 @@ static void dump_one_paca(int cpu)
 
        p = &paca[cpu];
 
-       printf("paca for cpu 0x%x @ %p:\n", cpu, p);
+       printf("paca for cpu 0x%x @ %px:\n", cpu, p);
 
        printf(" %-*s = %s\n", 20, "possible", cpu_possible(cpu) ? "yes" : "no");
        printf(" %-*s = %s\n", 20, "present", cpu_present(cpu) ? "yes" : "no");
@@ -2344,10 +2344,10 @@ static void dump_one_paca(int cpu)
        DUMP(p, kernel_toc, "lx");
        DUMP(p, kernelbase, "lx");
        DUMP(p, kernel_msr, "lx");
-       DUMP(p, emergency_sp, "p");
+       DUMP(p, emergency_sp, "px");
 #ifdef CONFIG_PPC_BOOK3S_64
-       DUMP(p, nmi_emergency_sp, "p");
-       DUMP(p, mc_emergency_sp, "p");
+       DUMP(p, nmi_emergency_sp, "px");
+       DUMP(p, mc_emergency_sp, "px");
        DUMP(p, in_nmi, "x");
        DUMP(p, in_mce, "x");
        DUMP(p, hmi_event_available, "x");
@@ -2375,17 +2375,21 @@ static void dump_one_paca(int cpu)
        DUMP(p, slb_cache_ptr, "x");
        for (i = 0; i < SLB_CACHE_ENTRIES; i++)
                printf(" slb_cache[%d]:        = 0x%016lx\n", i, p->slb_cache[i]);
+
+       DUMP(p, rfi_flush_fallback_area, "px");
+       DUMP(p, l1d_flush_congruence, "llx");
+       DUMP(p, l1d_flush_sets, "llx");
 #endif
        DUMP(p, dscr_default, "llx");
 #ifdef CONFIG_PPC_BOOK3E
-       DUMP(p, pgd, "p");
-       DUMP(p, kernel_pgd, "p");
-       DUMP(p, tcd_ptr, "p");
-       DUMP(p, mc_kstack, "p");
-       DUMP(p, crit_kstack, "p");
-       DUMP(p, dbg_kstack, "p");
+       DUMP(p, pgd, "px");
+       DUMP(p, kernel_pgd, "px");
+       DUMP(p, tcd_ptr, "px");
+       DUMP(p, mc_kstack, "px");
+       DUMP(p, crit_kstack, "px");
+       DUMP(p, dbg_kstack, "px");
 #endif
-       DUMP(p, __current, "p");
+       DUMP(p, __current, "px");
        DUMP(p, kstack, "lx");
        printf(" kstack_base          = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1));
        DUMP(p, stab_rr, "lx");
@@ -2403,7 +2407,7 @@ static void dump_one_paca(int cpu)
 #endif
 
 #ifdef CONFIG_PPC_POWERNV
-       DUMP(p, core_idle_state_ptr, "p");
+       DUMP(p, core_idle_state_ptr, "px");
        DUMP(p, thread_idle_state, "x");
        DUMP(p, thread_mask, "x");
        DUMP(p, subcore_sibling_mask, "x");
@@ -2945,7 +2949,7 @@ static void show_task(struct task_struct *tsk)
                (tsk->exit_state & EXIT_DEAD) ? 'E' :
                (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
 
-       printf("%p %016lx %6d %6d %c %2d %s\n", tsk,
+       printf("%px %016lx %6d %6d %c %2d %s\n", tsk,
                tsk->thread.ksp,
                tsk->pid, tsk->parent->pid,
                state, task_thread_info(tsk)->cpu,
@@ -2988,7 +2992,7 @@ static void show_pte(unsigned long addr)
 
        if (setjmp(bus_error_jmp) != 0) {
                catch_memory_errors = 0;
-               printf("*** Error dumping pte for task %p\n", tsk);
+               printf("*** Error dumping pte for task %px\n", tsk);
                return;
        }
 
@@ -3074,7 +3078,7 @@ static void show_tasks(void)
 
        if (setjmp(bus_error_jmp) != 0) {
                catch_memory_errors = 0;
-               printf("*** Error dumping task %p\n", tsk);
+               printf("*** Error dumping task %px\n", tsk);
                return;
        }
 
index e69de29..47dacf0 100644 (file)
@@ -0,0 +1,75 @@
+CONFIG_SMP=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RAS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+# CONFIG_RCU_TRACE is not set
+CONFIG_CRYPTO_USER_API_HASH=y
index 18158be..970460a 100644 (file)
@@ -40,6 +40,7 @@ generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sections.h
 generic-y += sembuf.h
+generic-y += serial.h
 generic-y += setup.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
index 6cbbb6a..5ad4cb6 100644 (file)
 #endif
 
 #if (__SIZEOF_INT__ == 4)
-#define INT            __ASM_STR(.word)
-#define SZINT          __ASM_STR(4)
-#define LGINT          __ASM_STR(2)
+#define RISCV_INT              __ASM_STR(.word)
+#define RISCV_SZINT            __ASM_STR(4)
+#define RISCV_LGINT            __ASM_STR(2)
 #else
 #error "Unexpected __SIZEOF_INT__"
 #endif
 
 #if (__SIZEOF_SHORT__ == 2)
-#define SHORT          __ASM_STR(.half)
-#define SZSHORT                __ASM_STR(2)
-#define LGSHORT                __ASM_STR(1)
+#define RISCV_SHORT            __ASM_STR(.half)
+#define RISCV_SZSHORT          __ASM_STR(2)
+#define RISCV_LGSHORT          __ASM_STR(1)
 #else
 #error "Unexpected __SIZEOF_SHORT__"
 #endif
index e2e37c5..e65d1cd 100644 (file)
@@ -50,30 +50,30 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
  * have the AQ or RL bits set.  These don't return anything, so there's only
  * one version to worry about.
  */
-#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix)                               \
-static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)             \
-{                                                                                              \
-       __asm__ __volatile__ (                                                                  \
-               "amo" #asm_op "." #asm_type " zero, %1, %0"                                     \
-               : "+A" (v->counter)                                                             \
-               : "r" (I)                                                                       \
-               : "memory");                                                                    \
+#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)                             \
+static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)     \
+{                                                                                      \
+       __asm__ __volatile__ (                                                          \
+               "amo" #asm_op "." #asm_type " zero, %1, %0"                             \
+               : "+A" (v->counter)                                                     \
+               : "r" (I)                                                               \
+               : "memory");                                                            \
 }
 
 #ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I)                        \
-        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )
+#define ATOMIC_OPS(op, asm_op, I)                      \
+        ATOMIC_OP (op, asm_op, I, w,  int,   )
 #else
-#define ATOMIC_OPS(op, asm_op, c_op, I)                        \
-        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )   \
-        ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
+#define ATOMIC_OPS(op, asm_op, I)                      \
+        ATOMIC_OP (op, asm_op, I, w,  int,   ) \
+        ATOMIC_OP (op, asm_op, I, d, long, 64)
 #endif
 
-ATOMIC_OPS(add, add, +,  i)
-ATOMIC_OPS(sub, add, +, -i)
-ATOMIC_OPS(and, and, &,  i)
-ATOMIC_OPS( or,  or, |,  i)
-ATOMIC_OPS(xor, xor, ^,  i)
+ATOMIC_OPS(add, add,  i)
+ATOMIC_OPS(sub, add, -i)
+ATOMIC_OPS(and, and,  i)
+ATOMIC_OPS( or,  or,  i)
+ATOMIC_OPS(xor, xor,  i)
 
 #undef ATOMIC_OP
 #undef ATOMIC_OPS
@@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^,  i)
  * There's two flavors of these: the arithmatic ops have both fetch and return
  * versions, while the logical ops only have fetch versions.
  */
-#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix)                   \
+#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix)                         \
 static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v)       \
 {                                                                                                      \
        register c_type ret;                                                                            \
@@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64)       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, d, long, 64)       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
 #endif
 
@@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl,         )
 #undef ATOMIC_OPS
 
 #ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
+#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)                                \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )
 #else
-#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )                \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
+#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)                                \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )      \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
 #endif
 
-ATOMIC_OPS(and, and, &,  i,      , _relaxed)
-ATOMIC_OPS(and, and, &,  i, .aq  , _acquire)
-ATOMIC_OPS(and, and, &,  i, .rl  , _release)
-ATOMIC_OPS(and, and, &,  i, .aqrl,         )
+ATOMIC_OPS(and, and, i,      , _relaxed)
+ATOMIC_OPS(and, and, i, .aq  , _acquire)
+ATOMIC_OPS(and, and, i, .rl  , _release)
+ATOMIC_OPS(and, and, i, .aqrl,         )
 
-ATOMIC_OPS( or,  or, |,  i,      , _relaxed)
-ATOMIC_OPS( or,  or, |,  i, .aq  , _acquire)
-ATOMIC_OPS( or,  or, |,  i, .rl  , _release)
-ATOMIC_OPS( or,  or, |,  i, .aqrl,         )
+ATOMIC_OPS( or,  or, i,      , _relaxed)
+ATOMIC_OPS( or,  or, i, .aq  , _acquire)
+ATOMIC_OPS( or,  or, i, .rl  , _release)
+ATOMIC_OPS( or,  or, i, .aqrl,         )
 
-ATOMIC_OPS(xor, xor, ^,  i,      , _relaxed)
-ATOMIC_OPS(xor, xor, ^,  i, .aq  , _acquire)
-ATOMIC_OPS(xor, xor, ^,  i, .rl  , _release)
-ATOMIC_OPS(xor, xor, ^,  i, .aqrl,         )
+ATOMIC_OPS(xor, xor, i,      , _relaxed)
+ATOMIC_OPS(xor, xor, i, .aq  , _acquire)
+ATOMIC_OPS(xor, xor, i, .rl  , _release)
+ATOMIC_OPS(xor, xor, i, .aqrl,         )
 
 #undef ATOMIC_OPS
 
@@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add,  <, 0)
 #undef ATOMIC_OP
 #undef ATOMIC_OPS
 
-#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix)                                \
+#define ATOMIC_OP(op, func_op, I, c_type, prefix)                              \
 static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v)       \
 {                                                                              \
        atomic##prefix##_##func_op(I, v);                                       \
 }
 
-#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix)                          \
+#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix)                                        \
 static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)       \
 {                                                                                      \
        return atomic##prefix##_fetch_##func_op(I, v);                                  \
@@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I)                                                \
-        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )                                \
+        ATOMIC_OP       (op, asm_op,       I,  int,   )                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I)                                                \
-        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )                                \
+        ATOMIC_OP       (op, asm_op,       I,  int,   )                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_OP       (op, asm_op, c_op, I, long, 64)                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64)                                \
+        ATOMIC_OP       (op, asm_op,       I, long, 64)                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I, long, 64)                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
 #endif
 
@@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
 
 /*
  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
- * {cmp,}xchg and the operations that return, so they need a barrier.  We just
- * use the other implementations directly.
+ * {cmp,}xchg and the operations that return, so they need a barrier.
+ */
+/*
+ * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
+ * assigning the same barrier to both the LR and SC operations, but that might
+ * not make any sense.  We're waiting on a memory model specification to
+ * determine exactly what the right thing to do is here.
  */
 #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or)                                             \
 static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n)         \
index 183534b..c0319cb 100644 (file)
 #define smp_wmb()      RISCV_FENCE(w,w)
 
 /*
- * These fences exist to enforce ordering around the relaxed AMOs.  The
- * documentation defines that
- * "
- *     atomic_fetch_add();
- *   is equivalent to:
- *     smp_mb__before_atomic();
- *     atomic_fetch_add_relaxed();
- *     smp_mb__after_atomic();
- * "
- * So we emit full fences on both sides.
- */
-#define __smb_mb__before_atomic()      smp_mb()
-#define __smb_mb__after_atomic()       smp_mb()
-
-/*
- * These barriers prevent accesses performed outside a spinlock from being moved
- * inside a spinlock.  Since RISC-V sets the aq/rl bits on our spinlock only
- * enforce release consistency, we need full fences here.
+ * This is a very specific barrier: it's currently only used in two places in
+ * the kernel, both in the scheduler.  See include/linux/spinlock.h for the two
+ * orderings it guarantees, but the "critical section is RCsc" guarantee
+ * mandates a barrier on RISC-V.  The sequence looks like:
+ *
+ *    lr.aq lock
+ *    sc    lock <= LOCKED
+ *    smp_mb__after_spinlock()
+ *    // critical section
+ *    lr    lock
+ *    sc.rl lock <= UNLOCKED
+ *
+ * The AQ/RL pair provides a RCpc critical section, but there's not really any
+ * way we can take advantage of that here because the ordering is only enforced
+ * on that one lock.  Thus, we're just doing a full fence.
  */
-#define smb_mb__before_spinlock()      smp_mb()
-#define smb_mb__after_spinlock()       smp_mb()
+#define smp_mb__after_spinlock()       RISCV_FENCE(rw,rw)
 
 #include <asm-generic/barrier.h>
 
index 7c281ef..f30daf2 100644 (file)
@@ -67,7 +67,7 @@
                : "memory");
 
 #define __test_and_op_bit(op, mod, nr, addr)                   \
-       __test_and_op_bit_ord(op, mod, nr, addr, )
+       __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
 #define __op_bit(op, mod, nr, addr)                            \
        __op_bit_ord(op, mod, nr, addr, )
 
index c3e1376..bfc7f09 100644 (file)
@@ -27,8 +27,8 @@
 typedef u32 bug_insn_t;
 
 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
-#define __BUG_ENTRY_ADDR       INT " 1b - 2b"
-#define __BUG_ENTRY_FILE       INT " %0 - 2b"
+#define __BUG_ENTRY_ADDR       RISCV_INT " 1b - 2b"
+#define __BUG_ENTRY_FILE       RISCV_INT " %0 - 2b"
 #else
 #define __BUG_ENTRY_ADDR       RISCV_PTR " 1b"
 #define __BUG_ENTRY_FILE       RISCV_PTR " %0"
@@ -38,7 +38,7 @@ typedef u32 bug_insn_t;
 #define __BUG_ENTRY                    \
        __BUG_ENTRY_ADDR "\n\t"         \
        __BUG_ENTRY_FILE "\n\t"         \
-       SHORT " %1"
+       RISCV_SHORT " %1"
 #else
 #define __BUG_ENTRY                    \
        __BUG_ENTRY_ADDR
index 0595585..efd89a8 100644 (file)
 
 #undef flush_icache_range
 #undef flush_icache_user_range
+#undef flush_dcache_page
 
 static inline void local_flush_icache_all(void)
 {
        asm volatile ("fence.i" ::: "memory");
 }
 
+#define PG_dcache_clean PG_arch_1
+
+static inline void flush_dcache_page(struct page *page)
+{
+       if (test_bit(PG_dcache_clean, &page->flags))
+               clear_bit(PG_dcache_clean, &page->flags);
+}
+
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range(start, end) flush_icache_all()
+#define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
+
 #ifndef CONFIG_SMP
 
-#define flush_icache_range(start, end) local_flush_icache_all()
-#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
+#define flush_icache_all() local_flush_icache_all()
+#define flush_icache_mm(mm, local) flush_icache_all()
 
 #else /* CONFIG_SMP */
 
-#define flush_icache_range(start, end) sbi_remote_fence_i(0)
-#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
+#define flush_icache_all() sbi_remote_fence_i(0)
+void flush_icache_mm(struct mm_struct *mm, bool local);
 
 #endif /* CONFIG_SMP */
 
+/*
+ * Bits in sys_riscv_flush_icache()'s flags argument.
+ */
+#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
+#define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+
 #endif /* _ASM_RISCV_CACHEFLUSH_H */
index 0d64bc9..3c7a2c9 100644 (file)
 #include <linux/const.h>
 
 /* Status register flags */
-#define SR_IE   _AC(0x00000002, UL) /* Interrupt Enable */
-#define SR_PIE  _AC(0x00000020, UL) /* Previous IE */
-#define SR_PS   _AC(0x00000100, UL) /* Previously Supervisor */
-#define SR_SUM  _AC(0x00040000, UL) /* Supervisor may access User Memory */
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_SPIE        _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
 
 #define SR_FS           _AC(0x00006000, UL) /* Floating-point Status */
 #define SR_FS_OFF       _AC(0x00000000, UL)
index c1f32cf..b269451 100644 (file)
@@ -19,7 +19,7 @@
 #ifndef _ASM_RISCV_IO_H
 #define _ASM_RISCV_IO_H
 
-#ifdef CONFIG_MMU
+#include <linux/types.h>
 
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
@@ -32,9 +32,7 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 #define ioremap_wc(addr, size) ioremap((addr), (size))
 #define ioremap_wt(addr, size) ioremap((addr), (size))
 
-extern void iounmap(void __iomem *addr);
-
-#endif /* CONFIG_MMU */
+extern void iounmap(volatile void __iomem *addr);
 
 /* Generic IO read/write.  These perform native-endian accesses. */
 #define __raw_writeb __raw_writeb
@@ -250,7 +248,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
                        const ctype *buf = buffer;                              \
                                                                                \
                        do {                                                    \
-                               __raw_writeq(*buf++, addr);                     \
+                               __raw_write ## len(*buf++, addr);               \
                        } while (--count);                                      \
                }                                                               \
                afence;                                                         \
@@ -266,9 +264,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar())
 __io_reads_ins(ins,  u8, b, __io_pbr(), __io_par())
 __io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
 __io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
-#define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count)
-#define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count)
-#define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count)
+#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
+#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
+#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
 
 __io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
 __io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
@@ -280,9 +278,9 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
 __io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
-#define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count)
-#define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count)
-#define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count)
+#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
+#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
+#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
 
 #ifdef CONFIG_64BIT
 __io_reads_ins(reads, u64, q, __io_br(), __io_ar())
index 6fdc860..07a3c6d 100644 (file)
@@ -27,25 +27,25 @@ static inline unsigned long arch_local_save_flags(void)
 /* unconditionally enable interrupts */
 static inline void arch_local_irq_enable(void)
 {
-       csr_set(sstatus, SR_IE);
+       csr_set(sstatus, SR_SIE);
 }
 
 /* unconditionally disable interrupts */
 static inline void arch_local_irq_disable(void)
 {
-       csr_clear(sstatus, SR_IE);
+       csr_clear(sstatus, SR_SIE);
 }
 
 /* get status and disable interrupts */
 static inline unsigned long arch_local_irq_save(void)
 {
-       return csr_read_clear(sstatus, SR_IE);
+       return csr_read_clear(sstatus, SR_SIE);
 }
 
 /* test flags */
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-       return !(flags & SR_IE);
+       return !(flags & SR_SIE);
 }
 
 /* test hardware interrupt enable bit */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
 /* set interrupt enabled status */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-       csr_set(sstatus, flags & SR_IE);
+       csr_set(sstatus, flags & SR_SIE);
 }
 
 #endif /* _ASM_RISCV_IRQFLAGS_H */
index 66805cb..5df2dcc 100644 (file)
 
 typedef struct {
        void *vdso;
+#ifdef CONFIG_SMP
+       /* A local icache flush is needed before user execution can resume. */
+       cpumask_t icache_stale_mask;
+#endif
 } mm_context_t;
 
 #endif /* __ASSEMBLY__ */
index de1fc16..9742483 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
 #ifndef _ASM_RISCV_MMU_CONTEXT_H
 #define _ASM_RISCV_MMU_CONTEXT_H
 
+#include <linux/mm_types.h>
 #include <asm-generic/mm_hooks.h>
 
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
 
 static inline void enter_lazy_tlb(struct mm_struct *mm,
        struct task_struct *task)
@@ -46,12 +49,54 @@ static inline void set_pgdir(pgd_t *pgd)
        csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE);
 }
 
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU.  RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches.  To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart.  This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+       unsigned int cpu = smp_processor_id();
+       cpumask_t *mask = &mm->context.icache_stale_mask;
+
+       if (cpumask_test_cpu(cpu, mask)) {
+               cpumask_clear_cpu(cpu, mask);
+               /*
+                * Ensure the remote hart's writes are visible to this hart.
+                * This pairs with a barrier in flush_icache_mm.
+                */
+               smp_mb();
+               local_flush_icache_all();
+       }
+#endif
+}
+
 static inline void switch_mm(struct mm_struct *prev,
        struct mm_struct *next, struct task_struct *task)
 {
        if (likely(prev != next)) {
+               /*
+                * Mark the current MM context as inactive, and the next as
+                * active.  This is at least used by the icache flushing
+                * routines in order to determine who should
+                */
+               unsigned int cpu = smp_processor_id();
+
+               cpumask_clear_cpu(cpu, mm_cpumask(prev));
+               cpumask_set_cpu(cpu, mm_cpumask(next));
+
                set_pgdir(next->pgd);
                local_flush_tlb_all();
+
+               flush_icache_deferred(next);
        }
 }
 
index 3399257..1630196 100644 (file)
@@ -20,8 +20,6 @@
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MMU
-
 /* Page Upper Directory not used in RISC-V */
 #include <asm-generic/pgtable-nopud.h>
 #include <asm/page.h>
@@ -178,28 +176,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
 #define pte_offset_map(dir, addr)      pte_offset_kernel((dir), (addr))
 #define pte_unmap(pte)                 ((void)(pte))
 
-/*
- * Certain architectures need to do special things when PTEs within
- * a page table are directly modified.  Thus, the following hook is
- * made available.
- */
-static inline void set_pte(pte_t *ptep, pte_t pteval)
-{
-       *ptep = pteval;
-}
-
-static inline void set_pte_at(struct mm_struct *mm,
-       unsigned long addr, pte_t *ptep, pte_t pteval)
-{
-       set_pte(ptep, pteval);
-}
-
-static inline void pte_clear(struct mm_struct *mm,
-       unsigned long addr, pte_t *ptep)
-{
-       set_pte_at(mm, addr, ptep, __pte(0));
-}
-
 static inline int pte_present(pte_t pte)
 {
        return (pte_val(pte) & _PAGE_PRESENT);
@@ -210,21 +186,22 @@ static inline int pte_none(pte_t pte)
        return (pte_val(pte) == 0);
 }
 
-/* static inline int pte_read(pte_t pte) */
-
 static inline int pte_write(pte_t pte)
 {
        return pte_val(pte) & _PAGE_WRITE;
 }
 
+static inline int pte_exec(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_EXEC;
+}
+
 static inline int pte_huge(pte_t pte)
 {
        return pte_present(pte)
                && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
 }
 
-/* static inline int pte_exec(pte_t pte) */
-
 static inline int pte_dirty(pte_t pte)
 {
        return pte_val(pte) & _PAGE_DIRTY;
@@ -311,6 +288,33 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
        return pte_val(pte_a) == pte_val(pte_b);
 }
 
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified.  Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+       *ptep = pteval;
+}
+
+void flush_icache_pte(pte_t pte);
+
+static inline void set_pte_at(struct mm_struct *mm,
+       unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+       if (pte_present(pteval) && pte_exec(pteval))
+               flush_icache_pte(pteval);
+
+       set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm,
+       unsigned long addr, pte_t *ptep)
+{
+       set_pte_at(mm, addr, ptep, __pte(0));
+}
+
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
                                        unsigned long address, pte_t *ptep,
@@ -407,8 +411,6 @@ static inline void pgtable_cache_init(void)
        /* No page table caches to initialize */
 }
 
-#endif /* CONFIG_MMU */
-
 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
 #define VMALLOC_END      (PAGE_OFFSET - 1)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
index 93b8956..2c5df94 100644 (file)
@@ -66,7 +66,7 @@ struct pt_regs {
 #define REG_FMT "%08lx"
 #endif
 
-#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0)
+#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
 
 
 /* Helpers for working with the instruction pointer */
index 04c71d9..2fd27e8 100644 (file)
@@ -24,7 +24,7 @@
 
 /* FIXME: Replace this with a ticket lock, like MIPS. */
 
-#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
@@ -58,15 +58,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        }
 }
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_rmb();
-       do {
-               cpu_relax();
-       } while (arch_spin_is_locked(lock));
-       smp_acquire__after_ctrl_dep();
-}
-
 /***********************************************************/
 
 static inline void arch_read_lock(arch_rwlock_t *lock)
index 22c3536..f8fa1cd 100644 (file)
@@ -64,8 +64,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_stack             (init_thread_union.stack)
-
 #endif /* !__ASSEMBLY__ */
 
 /*
index 3df4932..2f26989 100644 (file)
@@ -18,7 +18,7 @@
 
 typedef unsigned long cycles_t;
 
-static inline cycles_t get_cycles(void)
+static inline cycles_t get_cycles_inline(void)
 {
        cycles_t n;
 
@@ -27,6 +27,7 @@ static inline cycles_t get_cycles(void)
                : "=r" (n));
        return n;
 }
+#define get_cycles get_cycles_inline
 
 #ifdef CONFIG_64BIT
 static inline uint64_t get_cycles64(void)
index 5ee4ae3..7b9c24e 100644 (file)
 #ifndef _ASM_RISCV_TLBFLUSH_H
 #define _ASM_RISCV_TLBFLUSH_H
 
-#ifdef CONFIG_MMU
+#include <linux/mm_types.h>
 
-/* Flush entire local TLB */
+/*
+ * Flush entire local TLB.  'sfence.vma' implicitly fences with the instruction
+ * cache as well, so a 'fence.i' is not necessary.
+ */
 static inline void local_flush_tlb_all(void)
 {
        __asm__ __volatile__ ("sfence.vma" : : : "memory");
@@ -59,6 +62,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
        flush_tlb_all();
 }
 
-#endif /* CONFIG_MMU */
-
 #endif /* _ASM_RISCV_TLBFLUSH_H */
index 27b90d6..14b0b22 100644 (file)
@@ -127,7 +127,6 @@ extern int fixup_exception(struct pt_regs *state);
  * call.
  */
 
-#ifdef CONFIG_MMU
 #define __get_user_asm(insn, x, ptr, err)                      \
 do {                                                           \
        uintptr_t __tmp;                                        \
@@ -153,13 +152,11 @@ do {                                                              \
        __disable_user_access();                                \
        (x) = __x;                                              \
 } while (0)
-#endif /* CONFIG_MMU */
 
 #ifdef CONFIG_64BIT
 #define __get_user_8(x, ptr, err) \
        __get_user_asm("ld", x, ptr, err)
 #else /* !CONFIG_64BIT */
-#ifdef CONFIG_MMU
 #define __get_user_8(x, ptr, err)                              \
 do {                                                           \
        u32 __user *__ptr = (u32 __user *)(ptr);                \
@@ -193,7 +190,6 @@ do {                                                                \
        (x) = (__typeof__(x))((__typeof__((x)-(x)))(            \
                (((u64)__hi << 32) | __lo)));                   \
 } while (0)
-#endif /* CONFIG_MMU */
 #endif /* CONFIG_64BIT */
 
 
@@ -267,8 +263,6 @@ do {                                                                \
                ((x) = 0, -EFAULT);                             \
 })
 
-
-#ifdef CONFIG_MMU
 #define __put_user_asm(insn, x, ptr, err)                      \
 do {                                                           \
        uintptr_t __tmp;                                        \
@@ -292,14 +286,11 @@ do {                                                              \
                : "rJ" (__x), "i" (-EFAULT));                   \
        __disable_user_access();                                \
 } while (0)
-#endif /* CONFIG_MMU */
-
 
 #ifdef CONFIG_64BIT
 #define __put_user_8(x, ptr, err) \
        __put_user_asm("sd", x, ptr, err)
 #else /* !CONFIG_64BIT */
-#ifdef CONFIG_MMU
 #define __put_user_8(x, ptr, err)                              \
 do {                                                           \
        u32 __user *__ptr = (u32 __user *)(ptr);                \
@@ -329,7 +320,6 @@ do {                                                                \
                : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
        __disable_user_access();                                \
 } while (0)
-#endif /* CONFIG_MMU */
 #endif /* CONFIG_64BIT */
 
 
@@ -438,7 +428,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
  * will set "err" to -EFAULT, while successful accesses return the previous
  * value.
  */
-#ifdef CONFIG_MMU
 #define __cmpxchg_user(ptr, old, new, err, size, lrb, scb)     \
 ({                                                             \
        __typeof__(ptr) __ptr = (ptr);                          \
@@ -508,6 +497,5 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
        (err) = __err;                                          \
        __ret;                                                  \
 })
-#endif /* CONFIG_MMU */
 
 #endif /* _ASM_RISCV_UACCESS_H */
index 9f250ed..2f704a5 100644 (file)
@@ -14,3 +14,4 @@
 #define __ARCH_HAVE_MMU
 #define __ARCH_WANT_SYS_CLONE
 #include <uapi/asm/unistd.h>
+#include <uapi/asm/syscalls.h>
index 602f612..541544d 100644 (file)
@@ -38,4 +38,8 @@ struct vdso_data {
        (void __user *)((unsigned long)(base) + __vdso_##name);                 \
 })
 
+#ifdef CONFIG_SMP
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif
+
 #endif /* _ASM_RISCV_VDSO_H */
index 5ded96b..7e91f48 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += setup.h
 generic-y += unistd.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
new file mode 100644 (file)
index 0000000..818655b
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM__UAPI__SYSCALLS_H
+#define _ASM__UAPI__SYSCALLS_H
+
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
+
+#endif
index 20ee86f..7404ec2 100644 (file)
@@ -196,7 +196,7 @@ handle_syscall:
        addi s2, s2, 0x4
        REG_S s2, PT_SEPC(sp)
        /* System calls run with interrupts enabled */
-       csrs sstatus, SR_IE
+       csrs sstatus, SR_SIE
        /* Trace syscalls, but only if requested by the user. */
        REG_L t0, TASK_TI_FLAGS(tp)
        andi t0, t0, _TIF_SYSCALL_TRACE
@@ -224,8 +224,8 @@ ret_from_syscall:
 
 ret_from_exception:
        REG_L s0, PT_SSTATUS(sp)
-       csrc sstatus, SR_IE
-       andi s0, s0, SR_PS
+       csrc sstatus, SR_SIE
+       andi s0, s0, SR_SPP
        bnez s0, restore_all
 
 resume_userspace:
@@ -255,7 +255,7 @@ work_pending:
        bnez s1, work_resched
 work_notifysig:
        /* Handle pending signals and notify-resume requests */
-       csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
+       csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
        move a0, sp /* pt_regs */
        move a1, s0 /* current_thread_info->flags */
        tail do_notify_resume
index 76af908..78f670d 100644 (file)
@@ -152,6 +152,3 @@ END(_start)
 __PAGE_ALIGNED_BSS
        /* Empty zero page */
        .balign PAGE_SIZE
-ENTRY(empty_zero_page)
-       .fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00
-END(empty_zero_page)
index 0d90dcc..d74d4ad 100644 (file)
@@ -76,7 +76,7 @@ void show_regs(struct pt_regs *regs)
 void start_thread(struct pt_regs *regs, unsigned long pc,
        unsigned long sp)
 {
-       regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
+       regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
        regs->sepc = pc;
        regs->sp = sp;
        set_fs(USER_DS);
@@ -110,7 +110,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                const register unsigned long gp __asm__ ("gp");
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->gp = gp;
-               childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
+               childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
 
                p->thread.ra = (unsigned long)ret_from_kernel_thread;
                p->thread.s[0] = usp; /* fn */
index 23cc81e..5517342 100644 (file)
@@ -12,4 +12,7 @@
 /*
  * Assembly functions that may be used (directly or indirectly) by modules
  */
+EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
index de7db11..cb7b0c6 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/thread_info.h>
 
-#ifdef CONFIG_HVC_RISCV_SBI
-#include <asm/hvc_riscv_sbi.h>
-#endif
-
 #ifdef CONFIG_DUMMY_CONSOLE
 struct screen_info screen_info = {
        .orig_video_lines       = 30,
@@ -58,7 +54,12 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 #endif /* CONFIG_CMDLINE_BOOL */
 
 unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
 unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
 
 /* The lucky hart to first increment this variable will boot the other cores */
 atomic_t hart_lottery;
@@ -207,13 +208,6 @@ static void __init setup_bootmem(void)
 
 void __init setup_arch(char **cmdline_p)
 {
-#if defined(CONFIG_HVC_RISCV_SBI)
-       if (likely(early_console == NULL)) {
-               early_console = &riscv_sbi_early_console_dev;
-               register_console(early_console);
-       }
-#endif
-
 #ifdef CONFIG_CMDLINE_BOOL
 #ifdef CONFIG_CMDLINE_OVERRIDE
        strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
index b4a71ec..6d39624 100644 (file)
@@ -38,6 +38,13 @@ enum ipi_message_type {
        IPI_MAX
 };
 
+
+/* Unsupported */
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
+
 irqreturn_t handle_ipi(void)
 {
        unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
@@ -108,3 +115,51 @@ void smp_send_reschedule(int cpu)
 {
        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 }
+
+/*
+ * Performs an icache flush for the given MM context.  RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+       unsigned int cpu;
+       cpumask_t others, *mask;
+
+       preempt_disable();
+
+       /* Mark every hart's icache as needing a flush for this MM. */
+       mask = &mm->context.icache_stale_mask;
+       cpumask_setall(mask);
+       /* Flush this hart's I$ now, and mark it as flushed. */
+       cpu = smp_processor_id();
+       cpumask_clear_cpu(cpu, mask);
+       local_flush_icache_all();
+
+       /*
+        * Flush the I$ of other harts concurrently executing, and mark them as
+        * flushed.
+        */
+       cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+       local |= cpumask_empty(&others);
+       if (mm != current->active_mm || !local)
+               sbi_remote_fence_i(others.bits);
+       else {
+               /*
+                * It's assumed that at least one strongly ordered operation is
+                * performed on this hart between setting a hart's cpumask bit
+                * and scheduling this MM context on that hart.  Sending an SBI
+                * remote message will do this, but in the case where no
+                * messages are sent we still need to order this hart's writes
+                * with flush_icache_deferred().
+                */
+               smp_mb();
+       }
+
+       preempt_enable();
+}
index 4351be7..79c7866 100644 (file)
@@ -14,8 +14,8 @@
  */
 
 #include <linux/syscalls.h>
-#include <asm/cmpxchg.h>
 #include <asm/unistd.h>
+#include <asm/cacheflush.h>
 
 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
                           unsigned long prot, unsigned long flags,
@@ -47,3 +47,34 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
        return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
 }
 #endif /* !CONFIG_64BIT */
+
+#ifdef CONFIG_SMP
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * sys_riscv_flush_icache() is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
+       uintptr_t, flags)
+{
+       struct mm_struct *mm = current->mm;
+       bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
+
+       /* Check the reserved flags. */
+       if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
+               return -EINVAL;
+
+       flush_icache_mm(mm, local);
+
+       return 0;
+}
+#endif
index 4e30dc5..ade52b9 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/linkage.h>
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
+#include <asm/vdso.h>
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
index 523d0a8..324568d 100644 (file)
@@ -1,7 +1,12 @@
 # Copied from arch/tile/kernel/vdso/Makefile
 
 # Symbols present in the vdso
-vdso-syms = rt_sigreturn
+vdso-syms  = rt_sigreturn
+vdso-syms += gettimeofday
+vdso-syms += clock_gettime
+vdso-syms += clock_getres
+vdso-syms += getcpu
+vdso-syms += flush_icache
 
 # Files to link into the vdso
 obj-vdso = $(patsubst %, %.o, $(vdso-syms))
diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S
new file mode 100644 (file)
index 0000000..edf7e23
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
+ENTRY(__vdso_clock_getres)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_clock_getres
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_clock_getres)
diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S
new file mode 100644 (file)
index 0000000..aac6567
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
+ENTRY(__vdso_clock_gettime)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_clock_gettime
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_clock_gettime)
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
new file mode 100644 (file)
index 0000000..023e4d4
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
+ENTRY(__vdso_flush_icache)
+       .cfi_startproc
+#ifdef CONFIG_SMP
+       li a7, __NR_riscv_flush_icache
+       ecall
+#else
+       fence.i
+       li a0, 0
+#endif
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_flush_icache)
diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S
new file mode 100644 (file)
index 0000000..cc7e989
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
+ENTRY(__vdso_getcpu)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_getcpu
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_getcpu)
diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S
new file mode 100644 (file)
index 0000000..da85d33
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
+ENTRY(__vdso_gettimeofday)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_gettimeofday
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_gettimeofday)
index 8c9dce9..cd1d47e 100644 (file)
@@ -70,8 +70,11 @@ VERSION
        LINUX_4.15 {
        global:
                __vdso_rt_sigreturn;
-               __vdso_cmpxchg32;
-               __vdso_cmpxchg64;
+               __vdso_gettimeofday;
+               __vdso_clock_gettime;
+               __vdso_clock_getres;
+               __vdso_getcpu;
+               __vdso_flush_icache;
        local: *;
        };
 }
index 1cc4ac3..dce8ae2 100644 (file)
@@ -84,6 +84,7 @@ void __delay(unsigned long cycles)
        while ((unsigned long)(get_cycles() - t0) < cycles)
                cpu_relax();
 }
+EXPORT_SYMBOL(__delay);
 
 void udelay(unsigned long usecs)
 {
index 81f7d9c..eb22ab4 100644 (file)
@@ -2,3 +2,4 @@ obj-y += init.o
 obj-y += fault.o
 obj-y += extable.o
 obj-y += ioremap.o
+obj-y += cacheflush.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
new file mode 100644 (file)
index 0000000..498c0a0
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+void flush_icache_pte(pte_t pte)
+{
+       struct page *page = pte_page(pte);
+
+       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+               flush_icache_all();
+}
index df2ca3c..0713f3c 100644 (file)
@@ -63,7 +63,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
                goto vmalloc_fault;
 
        /* Enable interrupts if they were enabled in the parent context. */
-       if (likely(regs->sstatus & SR_PIE))
+       if (likely(regs->sstatus & SR_SPIE))
                local_irq_enable();
 
        /*
index e99194a..70ef272 100644 (file)
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(ioremap);
  *
  * Caller must ensure there is only one unmapping for the same pointer.
  */
-void iounmap(void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
 {
        vunmap((void *)((unsigned long)addr & PAGE_MASK));
 }
index eae2c64..9fdff3f 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 obj-y                          += kernel/
 obj-y                          += mm/
 obj-$(CONFIG_KVM)              += kvm/
index 6b3f419..de54cfc 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # s390/Makefile
 #
@@ -6,10 +7,6 @@
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # this architecture
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1994 by Linus Torvalds
 #
 
index 99f1cf0..b06def4 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the Linux - z/VM Monitor Stream.
 #
index ef3fb1b..cb6e806 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  * Exports appldata_register_ops() and appldata_unregister_ops() for the
index 598df57..e68136c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects data related to memory management.
index 66037d2..8bc14b0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects accumulated network statistics (Packets received/transmitted,
index 45b3178..433a994 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects misc. OS related data (CPU utilization, running processes).
index f02382a..42a2425 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 SECTIONS
 {
   .rodata.compressed : {
index aed3069..bed227f 100644 (file)
@@ -1,11 +1,8 @@
 #!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
 #
 # arch/s390x/boot/install.sh
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1995 by Linus Torvalds
 #
 # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
index b48e20d..d607987 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
  *             Harald Freudenberger <freude@de.ibm.com>
  *
  * Derived from "crypto/aes_generic.c"
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 
 #define KMSG_COMPONENT "aes_s390"
index 36aefc0..8720e92 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * s390 arch random implementation.
  *
  * Copyright IBM Corp. 2017
  * Author(s): Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #include <linux/kernel.h>
index 992e630..4368659 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Crypto-API module for CRC-32 algorithms implemented with the
  * z/Architecture Vector Extension Facility.
index 0d29666..5346b5a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -6,12 +7,6 @@
  * Copyright IBM Corp. 2003, 2011
  * Author(s): Thomas Spatzier
  *           Jan Glauber (jan.glauber@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include <linux/init.h>
index 564616d..3b7f96c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Cryptographic API.
  *
index a4e903e..003932d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Cryptographic API.
  *
@@ -7,11 +8,6 @@
  *   Copyright IBM Corp. 2017
  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *             Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #define KMSG_COMPONENT "paes_s390"
index 3e47c4a..a97a180 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2006, 2015
  * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
index 10f2007..d6f8258 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #ifndef _CRYPTO_ARCH_S390_SHA_H
 #define _CRYPTO_ARCH_S390_SHA_H
index c7de53d..a00c17f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
  *   Copyright (c) Alan Smithee.
  *   Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
  *   Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
index 53c2779..944aa6b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -6,12 +7,6 @@
  * s390 Version:
  *   Copyright IBM Corp. 2005, 2011
  *   Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
index 2f4caa1..b17eded 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <crypto/sha.h>
index c740f77..cf0718d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 
 #include <crypto/internal/hash.h>
index 2ee25ba..06f6015 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the linux hypfs filesystem routines.
 #
index cf8a2d9..43bbe63 100644 (file)
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  *    Hypervisor filesystem for Linux on s390.
  *
  *    Copyright IBM Corp. 2006, 2008
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
- *    License: GPL
  */
 
 #define KMSG_COMPONENT "hypfs"
index 41c211a..0484508 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 generic-y += asm-offsets.h
 generic-y += cacheflush.h
 generic-y += clkdev.h
index a720020..c2cf7bc 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_S390_ALTERNATIVE_H
 #define _ASM_S390_ALTERNATIVE_H
 
index c02f4ab..cfce683 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Adjunct processor (AP) interfaces
  *
  * Copyright IBM Corp. 2017
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  * Author(s): Tony Krowiak <akrowia@linux.vnet.ibm.com>
  *           Martin Schwidefsky <schwidefsky@de.ibm.com>
  *           Harald Freudenberger <freude@de.ibm.com>
index 0f5bd89..aa42a17 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  *  S390 version
  *    Copyright IBM Corp. 1999
index 792cda3..dd08db4 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * CPU-measurement facilities
  *
  *  Copyright IBM Corp. 2012
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *            Jan Glauber <jang@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #ifndef _ASM_S390_CPU_MF_H
 #define _ASM_S390_CPU_MF_H
index 9a3cb39..1a61b1b 100644 (file)
@@ -194,13 +194,14 @@ struct arch_elf_state {
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
-/*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
-#define ELF_ET_DYN_BASE                (is_compat_task() ? 0x000400000UL : \
-                                                   0x100000000UL)
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk. 64-bit
+   tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_compat_task() ? \
+                               (STACK_TOP / 3 * 2) : \
+                               (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
index 921391f..13de80c 100644 (file)
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 #ifndef _ASM_S390_KPROBES_H
 #define _ASM_S390_KPROBES_H
 /*
  *  Kernel Probes (KProbes)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  *
  * 2002-Oct    Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
index f3a9b5a..c1b0a9a 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for kernel virtual machines on s390
  *
  * Copyright IBM Corp. 2008, 2009
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
@@ -210,7 +207,8 @@ struct kvm_s390_sie_block {
        __u16   ipa;                    /* 0x0056 */
        __u32   ipb;                    /* 0x0058 */
        __u32   scaoh;                  /* 0x005c */
-       __u8    reserved60;             /* 0x0060 */
+#define FPF_BPBC       0x20
+       __u8    fpf;                    /* 0x0060 */
 #define ECB_GS         0x40
 #define ECB_TE         0x10
 #define ECB_SRSI       0x04
index 4139305..74eeec9 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for paravirtual devices on s390
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
 /*
@@ -20,8 +17,6 @@
  *
  * Copyright IBM Corp. 2007,2008
  * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
  */
 #ifndef __S390_KVM_PARA_H
 #define __S390_KVM_PARA_H
index 6de5c6c..672f95b 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * livepatch.h - s390-specific Kernel Live Patching Core
  *
@@ -7,13 +8,6 @@
  *           Jiri Slaby
  */
 
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
 #ifndef ASM_LIVEPATCH_H
 #define ASM_LIVEPATCH_H
 
index f4a07f7..65154ea 100644 (file)
@@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk,
 #ifdef CONFIG_PGSTE
        mm->context.alloc_pgste = page_table_allocate_pgste ||
                test_thread_flag(TIF_PGSTE) ||
-               current->mm->context.alloc_pgste;
+               (current->mm && current->mm->context.alloc_pgste);
        mm->context.has_pgste = 0;
        mm->context.use_skey = 0;
        mm->context.use_cmma = 0;
index d6c9d1e..b9c0e36 100644 (file)
@@ -40,6 +40,7 @@ struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 #define perf_misc_flags(regs) perf_misc_flags(regs)
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
 
 /* Perf pt_regs extension for sample-data-entry indicators */
 struct perf_sf_sde_regs {
index d7fe983..0a6b028 100644 (file)
@@ -709,7 +709,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
 }
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
index a3788da..6f70d81 100644 (file)
@@ -74,9 +74,14 @@ enum {
  */
 struct pt_regs 
 {
-       unsigned long args[1];
-       psw_t psw;
-       unsigned long gprs[NUM_GPRS];
+       union {
+               user_pt_regs user_regs;
+               struct {
+                       unsigned long args[1];
+                       psw_t psw;
+                       unsigned long gprs[NUM_GPRS];
+               };
+       };
        unsigned long orig_gpr2;
        unsigned int int_code;
        unsigned int int_parm;
index 8bfce34..97a0582 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_SEGMENT_H
 #define _ASM_SEGMENT_H
 
index ec7b476..c61b2cc 100644 (file)
@@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs)
        asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
 }
 
-#define switch_to(prev,next,last) do {                                 \
-       if (prev->mm) {                                                 \
-               save_fpu_regs();                                        \
-               save_access_regs(&prev->thread.acrs[0]);                \
-               save_ri_cb(prev->thread.ri_cb);                         \
-               save_gs_cb(prev->thread.gs_cb);                         \
-       }                                                               \
+#define switch_to(prev, next, last) do {                               \
+       /* save_fpu_regs() sets the CIF_FPU flag, which enforces        \
+        * a restore of the floating point / vector registers as        \
+        * soon as the next task returns to user space                  \
+        */                                                             \
+       save_fpu_regs();                                                \
+       save_access_regs(&prev->thread.acrs[0]);                        \
+       save_ri_cb(prev->thread.ri_cb);                                 \
+       save_gs_cb(prev->thread.gs_cb);                                 \
        update_cr_regs(next);                                           \
-       if (next->mm) {                                                 \
-               set_cpu_flag(CIF_FPU);                                  \
-               restore_access_regs(&next->thread.acrs[0]);             \
-               restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);  \
-               restore_gs_cb(next->thread.gs_cb);                      \
-       }                                                               \
-       prev = __switch_to(prev,next);                                  \
+       restore_access_regs(&next->thread.acrs[0]);                     \
+       restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);          \
+       restore_gs_cb(next->thread.gs_cb);                              \
+       prev = __switch_to(prev, next);                                 \
 } while (0)
 
 #endif /* __ASM_SWITCH_TO_H */
index 6bc941b..96f9a91 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Access to user system call parameters and results
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef _ASM_SYSCALL_H
index a702cb9..25057c1 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for store system information stsi
  *
  * Copyright IBM Corp. 2001, 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Ulrich Weigand <weigand@de.ibm.com>
  *              Christian Borntraeger <borntraeger@de.ibm.com>
  */
index 0880a37..25d6ec3 100644 (file)
@@ -42,8 +42,6 @@ struct thread_info {
        .flags          = 0,                    \
 }
 
-#define init_stack             (init_thread_union.stack)
-
 void arch_release_task_struct(struct task_struct *tsk);
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 
index 1807229..cca406f 100644 (file)
@@ -53,6 +53,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
 static inline void topology_init_early(void) { }
 static inline void topology_schedule_update(void) { }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
 static inline void topology_expect_change(void) { }
 
 #endif /* CONFIG_SCHED_TOPOLOGY */
index d375526..605dc46 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_S390_VGA_H
 #define _ASM_S390_VGA_H
 
index 098f287..92b7c9b 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..cefe7c7
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 9ad172d..4cdaa55 100644 (file)
@@ -6,10 +6,6 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
@@ -228,6 +224,7 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
 #define KVM_SYNC_GSCB   (1UL << 9)
+#define KVM_SYNC_BPBC   (1UL << 10)
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
 #define SDNXL (1UL << SDNXC)
@@ -251,7 +248,9 @@ struct kvm_sync_regs {
        };
        __u8  reserved[512];    /* for future vector expansion */
        __u32 fpc;              /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-       __u8 padding1[52];      /* riccb needs to be 64byte aligned */
+       __u8 bpbc : 1;          /* bp mode */
+       __u8 reserved2 : 7;
+       __u8 padding1[51];      /* riccb needs to be 64byte aligned */
        __u8 riccb[64];         /* runtime instrumentation controls block */
        __u8 padding2[192];     /* sdnx needs to be 256byte aligned */
        union {
index 0dc86b3..b9ab584 100644 (file)
@@ -4,9 +4,5 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
index c36c97f..84606b8 100644 (file)
@@ -4,10 +4,6 @@
  *
  * Copyright 2014 IBM Corp.
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef __LINUX_KVM_PERF_S390_H
index 7c8564f..d17dd9e 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _ASM_S390_PERF_REGS_H
 #define _ASM_S390_PERF_REGS_H
 
index 0d23c8f..543dd70 100644 (file)
 #define GPR_SIZE       8
 #define CR_SIZE                8
 
-#define STACK_FRAME_OVERHEAD    160      /* size of minimum stack frame */
+#define STACK_FRAME_OVERHEAD   160      /* size of minimum stack frame */
 
 #endif /* __s390x__ */
 
 #define ACR_SIZE       4
 
 
-#define PTRACE_OLDSETOPTIONS         21
+#define PTRACE_OLDSETOPTIONS        21
 
 #ifndef __ASSEMBLY__
 #include <linux/stddef.h>
 #include <linux/types.h>
 
-typedef union
-{
-       float   f;
-       double  d;
-        __u64   ui;
+typedef union {
+       float   f;
+       double  d;
+       __u64   ui;
        struct
        {
                __u32 hi;
@@ -197,23 +196,21 @@ typedef union
        } fp;
 } freg_t;
 
-typedef struct
-{
-       __u32   fpc;
+typedef struct {
+       __u32   fpc;
        __u32   pad;
-       freg_t  fprs[NUM_FPRS];              
+       freg_t  fprs[NUM_FPRS];
 } s390_fp_regs;
 
-#define FPC_EXCEPTION_MASK      0xF8000000
-#define FPC_FLAGS_MASK          0x00F80000
-#define FPC_DXC_MASK            0x0000FF00
-#define FPC_RM_MASK             0x00000003
+#define FPC_EXCEPTION_MASK     0xF8000000
+#define FPC_FLAGS_MASK         0x00F80000
+#define FPC_DXC_MASK           0x0000FF00
+#define FPC_RM_MASK            0x00000003
 
 /* this typedef defines how a Program Status Word looks like */
-typedef struct 
-{
-        unsigned long mask;
-        unsigned long addr;
+typedef struct {
+       unsigned long mask;
+       unsigned long addr;
 } __attribute__ ((aligned(8))) psw_t;
 
 #ifndef __s390x__
@@ -282,33 +279,40 @@ typedef struct
 /*
  * The s390_regs structure is used to define the elf_gregset_t.
  */
-typedef struct
-{
+typedef struct {
        psw_t psw;
        unsigned long gprs[NUM_GPRS];
        unsigned int  acrs[NUM_ACRS];
        unsigned long orig_gpr2;
 } s390_regs;
 
+/*
+ * The user_pt_regs structure exports the beginning of
+ * the in-kernel pt_regs structure to user space.
+ */
+typedef struct {
+       unsigned long args[1];
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+} user_pt_regs;
+
 /*
  * Now for the user space program event recording (trace) definitions.
  * The following structures are used only for the ptrace interface, don't
  * touch or even look at it if you don't want to modify the user-space
  * ptrace interface. In particular stay away from it for in-kernel PER.
  */
-typedef struct
-{
+typedef struct {
        unsigned long cr[NUM_CR_WORDS];
 } per_cr_words;
 
 #define PER_EM_MASK 0xE8000000UL
 
-typedef        struct
-{
+typedef struct {
 #ifdef __s390x__
-       unsigned                       : 32;
+       unsigned                       : 32;
 #endif /* __s390x__ */
-       unsigned em_branching          : 1;
+       unsigned em_branching          : 1;
        unsigned em_instruction_fetch  : 1;
        /*
         * Switching on storage alteration automatically fixes
@@ -317,44 +321,41 @@ typedef   struct
        unsigned em_storage_alteration : 1;
        unsigned em_gpr_alt_unused     : 1;
        unsigned em_store_real_address : 1;
-       unsigned                       : 3;
+       unsigned                       : 3;
        unsigned branch_addr_ctl       : 1;
-       unsigned                       : 1;
+       unsigned                       : 1;
        unsigned storage_alt_space_ctl : 1;
-       unsigned                       : 21;
+       unsigned                       : 21;
        unsigned long starting_addr;
        unsigned long ending_addr;
 } per_cr_bits;
 
-typedef struct
-{
+typedef struct {
        unsigned short perc_atmid;
        unsigned long address;
        unsigned char access_id;
 } per_lowcore_words;
 
-typedef struct
-{
-       unsigned perc_branching          : 1;
+typedef struct {
+       unsigned perc_branching          : 1;
        unsigned perc_instruction_fetch  : 1;
        unsigned perc_storage_alteration : 1;
-       unsigned perc_gpr_alt_unused     : 1;
+       unsigned perc_gpr_alt_unused     : 1;
        unsigned perc_store_real_address : 1;
-       unsigned                         : 3;
-       unsigned atmid_psw_bit_31        : 1;
-       unsigned atmid_validity_bit      : 1;
-       unsigned atmid_psw_bit_32        : 1;
-       unsigned atmid_psw_bit_5         : 1;
-       unsigned atmid_psw_bit_16        : 1;
-       unsigned atmid_psw_bit_17        : 1;
-       unsigned si                      : 2;
+       unsigned                         : 3;
+       unsigned atmid_psw_bit_31        : 1;
+       unsigned atmid_validity_bit      : 1;
+       unsigned atmid_psw_bit_32        : 1;
+       unsigned atmid_psw_bit_5         : 1;
+       unsigned atmid_psw_bit_16        : 1;
+       unsigned atmid_psw_bit_17        : 1;
+       unsigned si                      : 2;
        unsigned long address;
-       unsigned                         : 4;
-       unsigned access_id               : 4;
+       unsigned                         : 4;
+       unsigned access_id               : 4;
 } per_lowcore_bits;
 
-typedef struct
-{
+typedef struct {
        union {
                per_cr_words   words;
                per_cr_bits    bits;
@@ -364,9 +365,9 @@ typedef struct
         * the kernel always sets them to zero. To enable single
         * stepping use ptrace(PTRACE_SINGLESTEP) instead.
         */
-       unsigned  single_step       : 1;
+       unsigned  single_step       : 1;
        unsigned  instruction_fetch : 1;
-       unsigned                    : 30;
+       unsigned                    : 30;
        /*
         * These addresses are copied into cr10 & cr11 if single
         * stepping is switched off
@@ -376,11 +377,10 @@ typedef struct
        union {
                per_lowcore_words words;
                per_lowcore_bits  bits;
-       } lowcore; 
+       } lowcore;
 } per_struct;
 
-typedef struct
-{
+typedef struct {
        unsigned int  len;
        unsigned long kernel_addr;
        unsigned long process_addr;
@@ -390,12 +390,12 @@ typedef struct
  * S/390 specific non posix ptrace requests. I chose unusual values so
  * they are unlikely to clash with future ptrace definitions.
  */
-#define PTRACE_PEEKUSR_AREA           0x5000
-#define PTRACE_POKEUSR_AREA           0x5001
+#define PTRACE_PEEKUSR_AREA          0x5000
+#define PTRACE_POKEUSR_AREA          0x5001
 #define PTRACE_PEEKTEXT_AREA         0x5002
 #define PTRACE_PEEKDATA_AREA         0x5003
 #define PTRACE_POKETEXT_AREA         0x5004
-#define PTRACE_POKEDATA_AREA         0x5005
+#define PTRACE_POKEDATA_AREA         0x5005
 #define PTRACE_GET_LAST_BREAK        0x5006
 #define PTRACE_PEEK_SYSTEM_CALL       0x5007
 #define PTRACE_POKE_SYSTEM_CALL              0x5008
@@ -413,21 +413,19 @@ typedef struct
  * PT_PROT definition is loosely based on hppa bsd definition in
  * gdb/hppab-nat.c
  */
-#define PTRACE_PROT                       21
+#define PTRACE_PROT                      21
 
-typedef enum
-{
+typedef enum {
        ptprot_set_access_watchpoint,
        ptprot_set_write_watchpoint,
        ptprot_disable_watchpoint
 } ptprot_flags;
 
-typedef struct
-{
+typedef struct {
        unsigned long lowaddr;
        unsigned long hiaddr;
        ptprot_flags prot;
-} ptprot_area;                     
+} ptprot_area;
 
 /* Sequence of bytes for breakpoint illegal instruction.  */
 #define S390_BREAKPOINT     {0x0,0x1}
@@ -439,8 +437,7 @@ typedef struct
  * The user_regs_struct defines the way the user registers are
  * store on the stack for signal handling.
  */
-struct user_regs_struct
-{
+struct user_regs_struct {
        psw_t psw;
        unsigned long gprs[NUM_GPRS];
        unsigned int  acrs[NUM_ACRS];
index ec113db..b1b0223 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _UAPI_ASM_STHYI_H
 #define _UAPI_ASM_STHYI_H
 
index 967aad3..2b605f7 100644 (file)
@@ -1,13 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
 /*
  * Definitions for virtio-ccw devices.
  *
  * Copyright IBM Corp. 2013
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *  Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 #ifndef __KVM_VIRTIO_CCW_H
index 4caf717..aeaaa03 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /*
  * Copyright IBM Corp. 2004, 2005
  * Interface implementation for communication with the z/VM control program
index 137ef47..d568307 100644 (file)
@@ -9,20 +9,6 @@
  *            Eric Rossman (edrossma@us.ibm.com)
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef __ASM_S390_ZCRYPT_H
index 315986a..574e776 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 #include <linux/module.h>
 #include <asm/alternative.h>
 #include <asm/facility.h>
index f04db37..59eea9c 100644 (file)
@@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
                return retval;
        }
 
+       groups_sort(group_info);
        retval = set_current_groups(group_info);
        put_group_info(group_info);
 
index 58b9e12..80e974a 100644 (file)
@@ -1392,7 +1392,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
        else
                except_str = "-";
        caller = (unsigned long) entry->caller;
-       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p  ",
+       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK  ",
                      area, sec, usec, level, except_str,
                      entry->id.fields.cpuid, (void *)caller);
        return rc;
index 3be8297..b2c68fb 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Disassemble s390 instructions.
  *
@@ -396,9 +397,14 @@ struct s390_insn *find_insn(unsigned char *code)
        unsigned char opfrag;
        int i;
 
+       /* Search the opcode offset table to find an entry which
+        * matches the beginning of the opcode. If there is no match
+        * the last entry will be used, which is the default entry for
+        * unknown instructions as well as 1-byte opcode instructions.
+        */
        for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
                entry = &opcode_offset[i];
-               if (entry->opcode == code[0] || entry->opcode == 0)
+               if (entry->opcode == code[0])
                        break;
        }
 
@@ -543,7 +549,7 @@ void show_code(struct pt_regs *regs)
                start += opsize;
                pr_cont("%s", buffer);
                ptr = buffer;
-               ptr += sprintf(ptr, "\n\t  ");
+               ptr += sprintf(ptr, "\n          ");
                hops++;
        }
        pr_cont("\n");
index 2aa545d..5b23c4f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Stack dumping functions
  *
index a316cd6..9e5f6cd 100644 (file)
@@ -180,18 +180,17 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
  */
 ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
-       lgr     %r1,%r2
-       aghi    %r1,__TASK_thread               # thread_struct of prev task
-       lg      %r5,__TASK_stack(%r3)           # start of kernel stack of next
-       stg     %r15,__THREAD_ksp(%r1)          # store kernel stack of prev
-       lgr     %r1,%r3
-       aghi    %r1,__TASK_thread               # thread_struct of next task
+       lghi    %r4,__TASK_stack
+       lghi    %r1,__TASK_thread
+       lg      %r5,0(%r4,%r3)                  # start of kernel stack of next
+       stg     %r15,__THREAD_ksp(%r1,%r2)      # store kernel stack of prev
        lgr     %r15,%r5
        aghi    %r15,STACK_INIT                 # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
-       lg      %r15,__THREAD_ksp(%r1)          # load kernel stack of next
-       mvc     __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+       lg      %r15,__THREAD_ksp(%r1,%r3)      # load kernel stack of next
+       aghi    %r3,__TASK_pid
+       mvc     __LC_CURRENT_PID(4,%r0),0(%r3)  # store pid of next
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
        bzr     %r14
index 310e59e..8ecb872 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    ipl/reipl/dump support for Linux on s390.
  *
index 1a6521a..af3722c 100644 (file)
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Kernel Probes (KProbes)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  *
  * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
index bf9622f..452502f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux Guest Relocation (LGR) detection
  *
index 7b87991..b7abfad 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Kernel module help for s390.
  *
@@ -8,20 +9,6 @@
  *
  *  based on i386 version
  *    Copyright (C) 2001 Rusty Russell.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/module.h>
 #include <linux/elf.h>
index 6ff1692..c7a6276 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   Machine check handler
  *
index 746d034..cc085e2 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for s390x - CPU-measurement Counter Facility
  *
  *  Copyright IBM Corp. 2012, 2017
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "cpum_cf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 227b38b..1c9ddd7 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for the System z CPU-measurement Sampling Facility
  *
  * Copyright IBM Corp. 2013
  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "cpum_sf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 93a386f..0d770e5 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for s390x
  *
  *  Copyright IBM Corp. 2012, 2013
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "perf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index f8603eb..54e2d63 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 #include <linux/perf_event.h>
 #include <linux/perf_regs.h>
 #include <linux/kernel.h>
index 26c0523..cd3df55 100644 (file)
@@ -1650,6 +1650,14 @@ static const struct user_regset s390_compat_regsets[] = {
                .get = s390_gs_cb_get,
                .set = s390_gs_cb_set,
        },
+       {
+               .core_note_type = NT_S390_GS_BC,
+               .n = sizeof(struct gs_cb) / sizeof(__u64),
+               .size = sizeof(__u64),
+               .align = sizeof(__u64),
+               .get = s390_gs_bc_get,
+               .set = s390_gs_bc_set,
+       },
        {
                .core_note_type = NT_S390_RI_CB,
                .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
index 090053c..793da97 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  S390 version
  *    Copyright IBM Corp. 1999, 2012
index cd4334e..b8c1a85 100644 (file)
@@ -55,6 +55,7 @@
 #include <asm/sigp.h>
 #include <asm/idle.h>
 #include <asm/nmi.h>
+#include <asm/topology.h>
 #include "entry.h"
 
 enum {
index e66687d..460dcfb 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Stack trace management functions
  *
index 12981e1..80b862e 100644 (file)
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * store hypervisor information instruction emulation functions.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  * Copyright IBM Corp. 2016
  * Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
  */
index 308a7b6..f7fc633 100644 (file)
@@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
 SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
 SYSCALL(sys_socket,sys_socket)
 SYSCALL(sys_socketpair,compat_sys_socketpair)          /* 360 */
-SYSCALL(sys_bind,sys_bind)
-SYSCALL(sys_connect,sys_connect)
+SYSCALL(sys_bind,compat_sys_bind)
+SYSCALL(sys_connect,compat_sys_connect)
 SYSCALL(sys_listen,sys_listen)
-SYSCALL(sys_accept4,sys_accept4)
+SYSCALL(sys_accept4,compat_sys_accept4)
 SYSCALL(sys_getsockopt,compat_sys_getsockopt)          /* 365 */
 SYSCALL(sys_setsockopt,compat_sys_setsockopt)
 SYSCALL(sys_getsockname,compat_sys_getsockname)
index be61981..cf56116 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Time of day based timer functions.
  *
index f9b393d..4d5b65e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2011
  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
index 39a2187..f3a1c7c 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * vdso setup for s390
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #include <linux/init.h>
index eca3f00..f61df52 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_getres() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index a5769b8..2d6ec3a 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_gettime() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 63b86dc..aa8bf13 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of gettimeofday() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index c8513de..faf5213 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_getres() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 5d7b56b..6046b3b 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_gettime() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index b02e62f..cc9dbc2 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of gettimeofday() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 79a071e..db19d06 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
  * Here we can supply some information useful to userland.
index dd7178f..f24395a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Virtual cpu timer based timer functions.
  *
index 6048b1c..05ee90a 100644 (file)
@@ -1,10 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
 # Makefile for kernel virtual machines on s390
 #
 # Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
 
 KVM := ../../../virt/kvm
 common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o  $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
index d93a2c0..89aa114 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * handling diagnose instructions
  *
  * Copyright IBM Corp. 2008, 2011
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index bec42b8..f4c5175 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * access guest memory
  *
  * Copyright IBM Corp. 2008, 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
index bcbd866..b5f3e82 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * kvm guest debug support
  *
  * Copyright IBM Corp. 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
  */
 #include <linux/kvm_host.h>
index 8fe034b..9c7d707 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * in-kernel handling for sie intercepts
  *
  * Copyright IBM Corp. 2008, 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index fa55737..024ad8b 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * handling kvm guest interrupts
  *
  * Copyright IBM Corp. 2008, 2015
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
index d98e415..484608c 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * s390 irqchip routines
  *
  * Copyright IBM Corp. 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 #ifndef __KVM_IRQ_H
index 98ad8b9..1371dff 100644 (file)
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * hosting zSeries kernel virtual machines
+ * hosting IBM Z kernel virtual machines (s390x)
  *
- * Copyright IBM Corp. 2008, 2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
+ * Copyright IBM Corp. 2008, 2017
  *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
@@ -424,6 +421,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_GS:
                r = test_facility(133);
                break;
+       case KVM_CAP_S390_BPB:
+               r = test_facility(82);
+               break;
        default:
                r = 0;
        }
@@ -769,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
 
 /*
  * Must be called with kvm->srcu held to avoid races on memslots, and with
- * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
+ * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
  */
 static int kvm_s390_vm_start_migration(struct kvm *kvm)
 {
@@ -795,11 +795,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
 
        if (kvm->arch.use_cmma) {
                /*
-                * Get the last slot. They should be sorted by base_gfn, so the
-                * last slot is also the one at the end of the address space.
-                * We have verified above that at least one slot is present.
+                * Get the first slot. They are reverse sorted by base_gfn, so
+                * the first slot is also the one at the end of the address
+                * space. We have verified above that at least one slot is
+                * present.
                 */
-               ms = slots->memslots + slots->used_slots - 1;
+               ms = slots->memslots;
                /* round up so we only use full longs */
                ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
                /* allocate enough bytes to store all the bits */
@@ -824,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
 }
 
 /*
- * Must be called with kvm->lock to avoid races with ourselves and
+ * Must be called with kvm->slots_lock to avoid races with ourselves and
  * kvm_s390_vm_start_migration.
  */
 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
@@ -839,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
 
        if (kvm->arch.use_cmma) {
                kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
+               /* We have to wait for the essa emulation to finish */
+               synchronize_srcu(&kvm->srcu);
                vfree(mgs->pgste_bitmap);
        }
        kfree(mgs);
@@ -848,14 +851,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
 static int kvm_s390_vm_set_migration(struct kvm *kvm,
                                     struct kvm_device_attr *attr)
 {
-       int idx, res = -ENXIO;
+       int res = -ENXIO;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->slots_lock);
        switch (attr->attr) {
        case KVM_S390_VM_MIGRATION_START:
-               idx = srcu_read_lock(&kvm->srcu);
                res = kvm_s390_vm_start_migration(kvm);
-               srcu_read_unlock(&kvm->srcu, idx);
                break;
        case KVM_S390_VM_MIGRATION_STOP:
                res = kvm_s390_vm_stop_migration(kvm);
@@ -863,7 +864,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
        default:
                break;
        }
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->slots_lock);
 
        return res;
 }
@@ -1753,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&args, argp, sizeof(args)))
                        break;
+               mutex_lock(&kvm->slots_lock);
                r = kvm_s390_get_cmma_bits(kvm, &args);
+               mutex_unlock(&kvm->slots_lock);
                if (!r) {
                        r = copy_to_user(argp, &args, sizeof(args));
                        if (r)
@@ -1767,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&args, argp, sizeof(args)))
                        break;
+               mutex_lock(&kvm->slots_lock);
                r = kvm_s390_set_cmma_bits(kvm, &args);
+               mutex_unlock(&kvm->slots_lock);
                break;
        }
        default:
@@ -2200,6 +2205,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        kvm_s390_set_prefix(vcpu, 0);
        if (test_kvm_facility(vcpu->kvm, 64))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+       if (test_kvm_facility(vcpu->kvm, 82))
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
        if (test_kvm_facility(vcpu->kvm, 133))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
        /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2341,6 +2348,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        current->thread.fpu.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
+       vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
        kvm_clear_async_pf_completion_queue(vcpu);
        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -3300,6 +3308,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
                vcpu->arch.gs_enabled = 1;
        }
+       if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+           test_kvm_facility(vcpu->kvm, 82)) {
+               vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+               vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+       }
        save_access_regs(vcpu->arch.host_acrs);
        restore_access_regs(vcpu->run->s.regs.acrs);
        /* save host (userspace) fprs/vrs */
@@ -3346,6 +3359,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        kvm_run->s.regs.pft = vcpu->arch.pfault_token;
        kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
        kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+       kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
        save_access_regs(vcpu->run->s.regs.acrs);
        restore_access_regs(vcpu->arch.host_acrs);
        /* Save guest register state */
@@ -3372,7 +3386,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int rc;
-       sigset_t sigsaved;
 
        if (kvm_run->immediate_exit)
                return -EINTR;
@@ -3382,8 +3395,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return 0;
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
                kvm_s390_vcpu_start(vcpu);
@@ -3417,8 +3429,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        disable_cpu_timer_accounting(vcpu);
        store_regs(vcpu, kvm_run);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        vcpu->stat.exit_userspace++;
        return rc;
@@ -3811,6 +3822,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                        r = -EINVAL;
                        break;
                }
+               /* do not use irq_state.flags, it will break old QEMUs */
                r = kvm_s390_set_irq_state(vcpu,
                                           (void __user *) irq_state.buf,
                                           irq_state.len);
@@ -3826,6 +3838,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                        r = -EINVAL;
                        break;
                }
+               /* do not use irq_state.flags, it will break old QEMUs */
                r = kvm_s390_get_irq_state(vcpu,
                                           (__u8 __user *)  irq_state.buf,
                                           irq_state.len);
index 10d65df..5e46ba4 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for kvm on s390
  *
  * Copyright IBM Corp. 2008, 2009
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
index c954ac4..0714bfa 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * handling privileged instructions
  *
  * Copyright IBM Corp. 2008, 2013
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
@@ -235,8 +232,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
                VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
                return -EAGAIN;
        }
-       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
-               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
        return 0;
 }
 
@@ -247,6 +242,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
        int reg1, reg2;
        int rc;
 
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
        rc = try_handle_skey(vcpu);
        if (rc)
                return rc != -EAGAIN ? rc : 0;
@@ -276,6 +274,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
        int reg1, reg2;
        int rc;
 
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
        rc = try_handle_skey(vcpu);
        if (rc)
                return rc != -EAGAIN ? rc : 0;
@@ -311,6 +312,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
        int reg1, reg2;
        int rc;
 
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
        rc = try_handle_skey(vcpu);
        if (rc)
                return rc != -EAGAIN ? rc : 0;
@@ -1002,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
                cbrlo[entries] = gfn << PAGE_SHIFT;
        }
 
-       if (orc) {
+       if (orc && gfn < ms->bitmap_size) {
                /* increment only if we are really flipping the bit to 1 */
                if (!test_and_set_bit(gfn, ms->pgste_bitmap))
                        atomic64_inc(&ms->dirty_pages);
index 9d592ef..c1f5cde 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * handling interprocessor communication
  *
  * Copyright IBM Corp. 2008, 2013
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
index a311938..7513483 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * kvm nested virtualization support for s390x
  *
  * Copyright IBM Corp. 2016
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
  */
 #include <linux/vmalloc.h>
@@ -226,6 +223,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        memcpy(scb_o->gcr, scb_s->gcr, 128);
        scb_o->pp = scb_s->pp;
 
+       /* branch prediction */
+       if (test_kvm_facility(vcpu->kvm, 82)) {
+               scb_o->fpf &= ~FPF_BPBC;
+               scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+       }
+
        /* interrupt intercept */
        switch (scb_s->icptcode) {
        case ICPT_PROGI:
@@ -268,6 +271,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        scb_s->ecb3 = 0;
        scb_s->ecd = 0;
        scb_s->fac = 0;
+       scb_s->fpf = 0;
 
        rc = prepare_cpuflags(vcpu, vsie_page);
        if (rc)
@@ -327,6 +331,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                        prefix_unmapped(vsie_page);
                scb_s->ecb |= scb_o->ecb & ECB_TE;
        }
+       /* branch prediction */
+       if (test_kvm_facility(vcpu->kvm, 82))
+               scb_s->fpf |= scb_o->fpf & FPF_BPBC;
        /* SIMD */
        if (test_kvm_facility(vcpu->kvm, 129)) {
                scb_s->eca |= scb_o->eca & ECA_VX;
index cae5a1e..c4f8039 100644 (file)
@@ -89,11 +89,11 @@ EXPORT_SYMBOL(enable_sacf_uaccess);
 
 void disable_sacf_uaccess(mm_segment_t old_fs)
 {
+       current->thread.mm_segment = old_fs;
        if (old_fs == USER_DS && test_facility(27)) {
                __ctl_load(S390_lowcore.user_asce, 1, 1);
                clear_cpu_flag(CIF_ASCE_PRIMARY);
        }
-       current->thread.mm_segment = old_fs;
 }
 EXPORT_SYMBOL(disable_sacf_uaccess);
 
index 3d01717..6cf024e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Collaborative memory management interface.
  *
index b2c1401..05d459b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  KVM guest address space mapping code
  *
index 5bea139..831bdcf 100644 (file)
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  flexible mmap layout support
  *
  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  * All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *
  * Started by Ingo Molnar <mingo@elte.hu>
  */
 
index 434a956..cb36415 100644 (file)
@@ -83,8 +83,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
 
        /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
        VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
-       if (end >= TASK_SIZE_MAX)
-               return -ENOMEM;
        rc = 0;
        notify = 0;
        while (mm->context.asce_limit < end) {
index ae677f8..4f2b65d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2011
  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
index 90568c3..e0d5f24 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Arch-specific network modules
 #
index e81c168..9557d8b 100644 (file)
@@ -55,8 +55,7 @@ struct bpf_jit {
 #define SEEN_LITERAL   8       /* code uses literals */
 #define SEEN_FUNC      16      /* calls C functions */
 #define SEEN_TAIL_CALL 32      /* code uses tail calls */
-#define SEEN_SKB_CHANGE        64      /* code changes skb data */
-#define SEEN_REG_AX    128     /* code uses constant blinding */
+#define SEEN_REG_AX    64      /* code uses constant blinding */
 #define SEEN_STACK     (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
                        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
                                      REG_15, 152);
        }
-       if (jit->seen & SEEN_SKB)
+       if (jit->seen & SEEN_SKB) {
                emit_load_skb_data_hlen(jit);
-       if (jit->seen & SEEN_SKB_CHANGE)
                /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
                EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
                              STK_OFF_SKBP);
+       }
 }
 
 /*
@@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                EMIT2(0x0d00, REG_14, REG_W1);
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
-               if (bpf_helper_changes_pkt_data((void *)func)) {
-                       jit->seen |= SEEN_SKB_CHANGE;
+               if ((jit->seen & SEEN_SKB) &&
+                   bpf_helper_changes_pkt_data((void *)func)) {
                        /* lg %b1,ST_OFF_SKBP(%r15) */
                        EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
                                      REG_15, STK_OFF_SKBP);
index f94ecaf..66c2dff 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 obj-y                  += numa.o
 obj-y                  += toptree.o
 obj-$(CONFIG_NUMA_EMU) += mode_emu.o
index 805d8b2..22d0871 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the s390 PCI subsystem.
 #
index 0fe649c..4902fed 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2012
  *
index c2f786f..b482e95 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Copyright IBM Corp. 2012,2015
  *
index 0d300ee..2d15d84 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2012
  *
@@ -180,6 +181,9 @@ out_unlock:
 static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
                           size_t size, int flags)
 {
+       unsigned long irqflags;
+       int ret;
+
        /*
         * With zdev->tlb_refresh == 0, rpcit is not required to establish new
         * translations when previously invalid translation-table entries are
@@ -195,8 +199,22 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
                        return 0;
        }
 
-       return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
-                                 PAGE_ALIGN(size));
+       ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
+                                PAGE_ALIGN(size));
+       if (ret == -ENOMEM && !s390_iommu_strict) {
+               /* enable the hypervisor to free some resources */
+               if (zpci_refresh_global(zdev))
+                       goto out;
+
+               spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
+               bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
+                             zdev->lazy_bitmap, zdev->iommu_pages);
+               bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
+               spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
+               ret = 0;
+       }
+out:
+       return ret;
 }
 
 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
index 81b840b..f069929 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * s390 specific pci instructions
  *
@@ -88,6 +89,9 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
        if (cc)
                zpci_err_insn(cc, status, addr, range);
 
+       if (cc == 1 && (status == 4 || status == 16))
+               return -ENOMEM;
+
        return (cc) ? -EIO : 0;
 }
 
index 01d4c5a..357d426 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Generate opcode table initializers for the in-kernel disassembler.
  *
index ad51b56..bc4c7c9 100644 (file)
@@ -58,9 +58,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* How to get the thread information struct from C. */
 register struct thread_info *__current_thread_info __asm__("r28");
 #define current_thread_info()  __current_thread_info
index c94ee54..81271d3 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y      += bpf_perf_event.h
 generic-y      += siginfo.h
index 77c3535..412326d 100644 (file)
@@ -9,6 +9,7 @@
  */
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/sh_eth.h>
 #include <mach-se/mach/se.h>
 #include <mach-se/mach/mrshpc.h>
 #include <asm/machvec.h>
@@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
 #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
        defined(CONFIG_CPU_SUBTYPE_SH7712)
 /* SH771X Ethernet driver */
+static struct sh_eth_plat_data sh_eth_plat = {
+       .phy = PHY_ID,
+       .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
 static struct resource sh_eth0_resources[] = {
        [0] = {
                .start = SH_ETH0_BASE,
-               .end = SH_ETH0_BASE + 0x1B8,
+               .end = SH_ETH0_BASE + 0x1B8 - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
+               .start = SH_TSU_BASE,
+               .end = SH_TSU_BASE + 0x200 - 1,
+               .flags = IORESOURCE_MEM,
+       },
+       [2] = {
                .start = SH_ETH0_IRQ,
                .end = SH_ETH0_IRQ,
                .flags = IORESOURCE_IRQ,
@@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
        .name = "sh771x-ether",
        .id = 0,
        .dev = {
-               .platform_data = PHY_ID,
+               .platform_data = &sh_eth_plat,
        },
        .num_resources = ARRAY_SIZE(sh_eth0_resources),
        .resource = sh_eth0_resources,
@@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
 static struct resource sh_eth1_resources[] = {
        [0] = {
                .start = SH_ETH1_BASE,
-               .end = SH_ETH1_BASE + 0x1B8,
+               .end = SH_ETH1_BASE + 0x1B8 - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
+               .start = SH_TSU_BASE,
+               .end = SH_TSU_BASE + 0x200 - 1,
+               .flags = IORESOURCE_MEM,
+       },
+       [2] = {
                .start = SH_ETH1_IRQ,
                .end = SH_ETH1_IRQ,
                .flags = IORESOURCE_IRQ,
@@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
        .name = "sh771x-ether",
        .id = 1,
        .dev = {
-               .platform_data = PHY_ID,
+               .platform_data = &sh_eth_plat,
        },
        .num_resources = ARRAY_SIZE(sh_eth1_resources),
        .resource = sh_eth1_resources,
index becb798..cf5c792 100644 (file)
@@ -63,9 +63,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the current stack pointer from C */
 register unsigned long current_stack_pointer asm("r15") __used;
 
index 4246ef9..aa83fe1 100644 (file)
 /* Base address */
 #define SH_ETH0_BASE 0xA7000000
 #define SH_ETH1_BASE 0xA7000400
+#define SH_TSU_BASE  0xA7000800
 /* PHY ID */
 #if defined(CONFIG_CPU_SUBTYPE_SH7710)
 # define PHY_ID 0x00
index e285313..ba4d39c 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 818d3aa..d257186 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
 
 obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
 obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
-obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
+obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
 
 obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
 
index 5a9e96b..9937c5f 100644 (file)
@@ -715,7 +715,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return pte_pfn(pte);
 }
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline unsigned long pmd_write(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
index febaaeb..548b366 100644 (file)
@@ -63,9 +63,6 @@ struct thread_info {
        .preempt_count  =       INIT_PREEMPT_COUNT,     \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 register struct thread_info *current_thread_info_reg asm("g6");
 #define current_thread_info()   (current_thread_info_reg)
index caf9153..f7e7b0b 100644 (file)
@@ -120,9 +120,6 @@ struct thread_info {
        .preempt_count  =       INIT_PREEMPT_COUNT,     \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 register struct thread_info *current_thread_info_reg asm("g6");
 #define current_thread_info()  (current_thread_info_reg)
index 2178c78..4680ba2 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += types.h
index 0f0f76b..063556f 100644 (file)
@@ -19,7 +19,7 @@ lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 lib-$(CONFIG_SPARC64) += multi3.o
 lib-$(CONFIG_SPARC64) += fls.o
 lib-$(CONFIG_SPARC64) += fls64.o
-obj-$(CONFIG_SPARC64) += NG4fls.o
+lib-$(CONFIG_SPARC64) += NG4fls.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
index e5547b2..0ddbbb0 100644 (file)
@@ -44,8 +44,8 @@ EXPORT_SYMBOL(__arch_hweight32)
        .previous
 
 ENTRY(__arch_hweight64)
-       sethi   %hi(__sw_hweight16), %g1
-       jmpl    %g1 + %lo(__sw_hweight16), %g0
+       sethi   %hi(__sw_hweight64), %g1
+       jmpl    %g1 + %lo(__sw_hweight64), %g0
         nop
 ENDPROC(__arch_hweight64)
 EXPORT_SYMBOL(__arch_hweight64)
index be3136f..a8103a8 100644 (file)
@@ -113,7 +113,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
               tsk->comm, task_pid_nr(tsk), address,
               (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
index 815c03d..41363f4 100644 (file)
@@ -154,7 +154,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
               tsk->comm, task_pid_nr(tsk), address,
               (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
index 5765e7e..ff5f9cb 100644 (file)
@@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                u8 *func = ((u8 *)__bpf_call_base) + imm;
 
                ctx->saw_call = true;
+               if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+                       emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
 
                emit_call((u32 *)func, ctx);
                emit_nop(ctx);
 
                emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
 
-               if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind)
-                       load_skb_regs(ctx, bpf2sparc[BPF_REG_6]);
+               if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+                       load_skb_regs(ctx, L7);
                break;
        }
 
index 2a26cc4..adfa21b 100644 (file)
@@ -475,7 +475,6 @@ static inline void pmd_clear(pmd_t *pmdp)
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_huge_page(pmd)     pte_huge(pmd_pte(pmd))
 #define pmd_mkhuge(pmd)                pte_pmd(pte_mkhuge(pmd_pte(pmd)))
-#define __HAVE_ARCH_PMD_WRITE
 
 #define pfn_pmd(pfn, pgprot)   pte_pmd(pfn_pte((pfn), (pgprot)))
 #define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
index b7659b8..2adcacd 100644 (file)
@@ -59,9 +59,6 @@ struct thread_info {
        .align_ctl      = 0,                    \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 #endif /* !__ASSEMBLY__ */
 
 #if PAGE_SIZE < 8192
index 5711de0..cc43961 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 50a32c3..73c57f6 100644 (file)
@@ -1,4 +1,5 @@
 generic-y += barrier.h
+generic-y += bpf_perf_event.h
 generic-y += bug.h
 generic-y += clkdev.h
 generic-y += current.h
index b668e35..fca34b2 100644 (file)
@@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm);
 /*
  * Needed since we do not use the asm-generic/mm_hooks.h:
  */
-static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
        uml_setup_stubs(mm);
+       return 0;
 }
 extern void arch_exit_mmap(struct mm_struct *mm);
 static inline void arch_unmap(struct mm_struct *mm,
index 86942a4..b58b746 100644 (file)
@@ -58,7 +58,10 @@ static inline void release_thread(struct task_struct *task)
 {
 }
 
-#define init_stack     (init_thread_union.stack)
+static inline void mm_copy_segments(struct mm_struct *from_mm,
+                                   struct mm_struct *new_mm)
+{
+}
 
 /*
  * User space process size: 3GB (default).
index 9300f76..4eecd96 100644 (file)
@@ -6,6 +6,9 @@
 #ifndef __UM_THREAD_INFO_H
 #define __UM_THREAD_INFO_H
 
+#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
+#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
+
 #ifndef __ASSEMBLY__
 
 #include <asm/types.h>
@@ -37,10 +40,6 @@ struct thread_info {
        .real_thread = NULL,                    \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
-#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
@@ -53,8 +52,6 @@ static inline struct thread_info *current_thread_info(void)
        return ti;
 }
 
-#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
-
 #endif
 
 #define TIF_SYSCALL_TRACE      0       /* syscall trace active */
diff --git a/arch/um/include/asm/vmlinux.lds.h b/arch/um/include/asm/vmlinux.lds.h
new file mode 100644 (file)
index 0000000..149494a
--- /dev/null
@@ -0,0 +1,2 @@
+#include <asm/thread_info.h>
+#include <asm-generic/vmlinux.lds.h>
index d417e38..5568cf8 100644 (file)
@@ -1,5 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
 #include <asm/page.h>
 
 OUTPUT_FORMAT(ELF_FORMAT)
index 4e6fcb3..4286441 100644 (file)
@@ -150,7 +150,7 @@ static void show_segv_info(struct uml_pt_regs *regs)
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
                (void *)UPT_IP(regs), (void *)UPT_SP(regs),
index f433690..a818cce 100644 (file)
@@ -54,7 +54,7 @@ struct cpuinfo_um boot_cpu_data = {
 
 union thread_union cpu0_irqstack
        __attribute__((__section__(".data..init_irqstack"))) =
-               { INIT_THREAD_INFO(init_task) };
+               { .thread_info = INIT_THREAD_INFO(init_task) };
 
 /* Changed in setup_arch, which is called in early boot */
 static char host_info[(__NEW_UTS_LEN + 1) * 5];
index 3d6ed6b..36b07ec 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
 #include <asm/page.h>
 
 OUTPUT_FORMAT(ELF_FORMAT)
index 59b06b4..5c205a9 100644 (file)
@@ -81,9 +81,10 @@ do { \
        } \
 } while (0)
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-                                struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+                               struct mm_struct *mm)
 {
+       return 0;
 }
 
 static inline void arch_unmap(struct mm_struct *mm,
index e79ad6d..5fb728f 100644 (file)
@@ -87,9 +87,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,                                    \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /*
  * how to get the thread information struct from C
  */
index 759a714..8611ef9 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 5f25b39..c4ac604 100644 (file)
@@ -298,7 +298,6 @@ void abort(void)
        /* if that doesn't kill us, halt */
        panic("Oops failed to kill thread");
 }
-EXPORT_SYMBOL(abort);
 
 void __init trap_init(void)
 {
index 8eed3f9..20da391 100644 (file)
@@ -55,7 +55,6 @@ config X86
        select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_KCOV                    if X86_64
        select ARCH_HAS_PMEM_API                if X86_64
-       # Causing hangs/crashes, see the commit that added this change for details.
        select ARCH_HAS_REFCOUNT
        select ARCH_HAS_UACCESS_FLUSHCACHE      if X86_64
        select ARCH_HAS_SET_MEMORY
@@ -89,6 +88,7 @@ config X86
        select GENERIC_CLOCKEVENTS_MIN_ADJUST
        select GENERIC_CMOS_UPDATE
        select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_VULNERABILITIES
        select GENERIC_EARLY_IOREMAP
        select GENERIC_FIND_FIRST_BIT
        select GENERIC_IOMAP
@@ -429,6 +429,19 @@ config GOLDFISH
        def_bool y
        depends on X86_GOLDFISH
 
+config RETPOLINE
+       bool "Avoid speculative indirect branches in kernel"
+       default y
+       help
+         Compile kernel with the retpoline compiler options to guard against
+         kernel-to-user data leaks by avoiding speculative indirect
+         branches. Requires a compiler with -mindirect-branch=thunk-extern
+         support for full protection. The kernel may run slower.
+
+         Without compiler support, at least indirect branches in assembler
+         code are eliminated. Since this includes the syscall entry path,
+         it is not entirely pointless.
+
 config INTEL_RDT
        bool "Intel Resource Director Technology support"
        default n
@@ -926,7 +939,8 @@ config MAXSMP
 config NR_CPUS
        int "Maximum number of CPUs" if SMP && !MAXSMP
        range 2 8 if SMP && X86_32 && !X86_BIGSMP
-       range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK
+       range 2 64 if SMP && X86_32 && X86_BIGSMP
+       range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
        range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
        default "1" if !SMP
        default "8192" if MAXSMP
index 6293a87..672441c 100644 (file)
@@ -400,6 +400,7 @@ config UNWINDER_FRAME_POINTER
 config UNWINDER_GUESS
        bool "Guess unwinder"
        depends on EXPERT
+       depends on !STACKDEPOT
        ---help---
          This option enables the "guess" unwinder for unwinding kernel stack
          traces.  It scans the stack and reports every kernel text address it
index 3e73bc2..fad5516 100644 (file)
@@ -230,6 +230,14 @@ KBUILD_CFLAGS += -Wno-sign-compare
 #
 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 
+# Avoid indirect branches in kernel to deal with Spectre
+ifdef CONFIG_RETPOLINE
+    RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+    ifneq ($(RETPOLINE_CFLAGS),)
+        KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+    endif
+endif
+
 archscripts: scripts_basic
        $(Q)$(MAKE) $(build)=arch/x86/tools relocs
 
index 1e9c322..f25e153 100644 (file)
@@ -80,6 +80,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
 ifdef CONFIG_X86_64
        vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
        vmlinux-objs-y += $(obj)/mem_encrypt.o
+       vmlinux-objs-y += $(obj)/pgtable_64.o
 endif
 
 $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
index 20919b4..fc313e2 100644 (file)
@@ -305,10 +305,18 @@ ENTRY(startup_64)
        leaq    boot_stack_end(%rbx), %rsp
 
 #ifdef CONFIG_X86_5LEVEL
-       /* Check if 5-level paging has already enabled */
-       movq    %cr4, %rax
-       testl   $X86_CR4_LA57, %eax
-       jnz     lvl5
+       /*
+        * Check if we need to enable 5-level paging.
+        * RSI holds real mode data and need to be preserved across
+        * a function call.
+        */
+       pushq   %rsi
+       call    l5_paging_required
+       popq    %rsi
+
+       /* If l5_paging_required() returned zero, we're done here. */
+       cmpq    $0, %rax
+       je      lvl5
 
        /*
         * At this point we are in long mode with 4-level paging enabled,
index b50c424..98761a1 100644 (file)
@@ -169,6 +169,16 @@ void __puthex(unsigned long value)
        }
 }
 
+static bool l5_supported(void)
+{
+       /* Check if leaf 7 is supported. */
+       if (native_cpuid_eax(0) < 7)
+               return 0;
+
+       /* Check if la57 is supported. */
+       return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31));
+}
+
 #if CONFIG_X86_NEED_RELOCS
 static void handle_relocations(void *output, unsigned long output_len,
                               unsigned long virt_addr)
@@ -362,6 +372,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
        console_init();
        debug_putstr("early console in extract_kernel\n");
 
+       if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) {
+               error("This linux kernel as configured requires 5-level paging\n"
+                       "This CPU does not support the required 'cr4.la57' feature\n"
+                       "Unable to boot - please use a kernel appropriate for your CPU\n");
+       }
+
        free_mem_ptr     = heap;        /* Heap */
        free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
 
index d5364ca..b5e5e02 100644 (file)
@@ -23,6 +23,9 @@
  */
 #undef CONFIG_AMD_MEM_ENCRYPT
 
+/* No PAGE_TABLE_ISOLATION support needed either: */
+#undef CONFIG_PAGE_TABLE_ISOLATION
+
 #include "misc.h"
 
 /* These actually do the work of building the kernel identity maps. */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
new file mode 100644 (file)
index 0000000..b4469a3
--- /dev/null
@@ -0,0 +1,28 @@
+#include <asm/processor.h>
+
+/*
+ * __force_order is used by special_insns.h asm code to force instruction
+ * serialization.
+ *
+ * It is not referenced from the code, but GCC < 5 with -fPIE would fail
+ * due to an undefined symbol. Define it to make these ancient GCCs work.
+ */
+unsigned long __force_order;
+
+int l5_paging_required(void)
+{
+       /* Check if leaf 7 is supported. */
+
+       if (native_cpuid_eax(0) < 7)
+               return 0;
+
+       /* Check if la57 is supported. */
+       if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
+               return 0;
+
+       /* Check if 5-level paging has already been enabled. */
+       if (native_read_cr4() & X86_CR4_LA57)
+               return 0;
+
+       return 1;
+}
index 49f4970..6a10d52 100644 (file)
@@ -44,9 +44,9 @@ FDINITRD=$6
 
 # Make sure the files actually exist
 verify "$FBZIMAGE"
-verify "$MTOOLSRC"
 
 genbzdisk() {
+       verify "$MTOOLSRC"
        mformat a:
        syslinux $FIMAGE
        echo "$KCMDLINE" | mcopy - a:syslinux.cfg
@@ -57,6 +57,7 @@ genbzdisk() {
 }
 
 genfdimage144() {
+       verify "$MTOOLSRC"
        dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null
        mformat v:
        syslinux $FIMAGE
@@ -68,6 +69,7 @@ genfdimage144() {
 }
 
 genfdimage288() {
+       verify "$MTOOLSRC"
        dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null
        mformat w:
        syslinux $FIMAGE
@@ -78,39 +80,43 @@ genfdimage288() {
        mcopy $FBZIMAGE w:linux
 }
 
-genisoimage() {
+geniso() {
        tmp_dir=`dirname $FIMAGE`/isoimage
        rm -rf $tmp_dir
        mkdir $tmp_dir
-       for i in lib lib64 share end ; do
+       for i in lib lib64 share ; do
                for j in syslinux ISOLINUX ; do
                        if [ -f /usr/$i/$j/isolinux.bin ] ; then
                                isolinux=/usr/$i/$j/isolinux.bin
-                               cp $isolinux $tmp_dir
                        fi
                done
                for j in syslinux syslinux/modules/bios ; do
                        if [ -f /usr/$i/$j/ldlinux.c32 ]; then
                                ldlinux=/usr/$i/$j/ldlinux.c32
-                               cp $ldlinux $tmp_dir
                        fi
                done
                if [ -n "$isolinux" -a -n "$ldlinux" ] ; then
                        break
                fi
-               if [ $i = end -a -z "$isolinux" ] ; then
-                       echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
-                       exit 1
-               fi
        done
+       if [ -z "$isolinux" ] ; then
+               echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
+               exit 1
+       fi
+       if [ -z "$ldlinux" ] ; then
+               echo 'Need an ldlinux.c32 file, please install syslinux/isolinux.'
+               exit 1
+       fi
+       cp $isolinux $tmp_dir
+       cp $ldlinux $tmp_dir
        cp $FBZIMAGE $tmp_dir/linux
        echo "$KCMDLINE" > $tmp_dir/isolinux.cfg
        if [ -f "$FDINITRD" ] ; then
                cp "$FDINITRD" $tmp_dir/initrd.img
        fi
-       mkisofs -J -r -input-charset=utf-8 -quiet -o $FIMAGE -b isolinux.bin \
-               -c boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table \
-               $tmp_dir
+       genisoimage -J -r -input-charset=utf-8 -quiet -o $FIMAGE \
+               -b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 \
+               -boot-info-table $tmp_dir
        isohybrid $FIMAGE 2>/dev/null || true
        rm -rf $tmp_dir
 }
@@ -119,6 +125,6 @@ case $1 in
        bzdisk)     genbzdisk;;
        fdimage144) genfdimage144;;
        fdimage288) genfdimage288;;
-       isoimage)   genisoimage;;
+       isoimage)   geniso;;
        *)          echo 'Unknown image format'; exit 1;
 esac
index 16627fe..3d09e3a 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/linkage.h>
 #include <asm/inst.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 /*
  * The following macros are used to move an (un)aligned 16 byte value to/from
@@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8)
        pxor INC, STATE4
        movdqu IV, 0x30(OUTP)
 
-       call *%r11
+       CALL_NOSPEC %r11
 
        movdqu 0x00(OUTP), INC
        pxor INC, STATE1
@@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8)
        _aesni_gf128mul_x_ble()
        movups IV, (IVP)
 
-       call *%r11
+       CALL_NOSPEC %r11
 
        movdqu 0x40(OUTP), INC
        pxor INC, STATE1
index f7c495e..a14af6e 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way:
        vpxor 14 * 16(%rax), %xmm15, %xmm14;
        vpxor 15 * 16(%rax), %xmm15, %xmm15;
 
-       call *%r9;
+       CALL_NOSPEC %r9;
 
        addq $(16 * 16), %rsp;
 
index eee5b39..b66bbfa 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way:
        vpxor 14 * 32(%rax), %ymm15, %ymm14;
        vpxor 15 * 32(%rax), %ymm15, %ymm15;
 
-       call *%r9;
+       CALL_NOSPEC %r9;
 
        addq $(16 * 32), %rsp;
 
index 7a7de27..d9b734d 100644 (file)
@@ -45,6 +45,7 @@
 
 #include <asm/inst.h>
 #include <linux/linkage.h>
+#include <asm/nospec-branch.h>
 
 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
 
@@ -172,7 +173,7 @@ continue_block:
        movzxw  (bufp, %rax, 2), len
        lea     crc_array(%rip), bufp
        lea     (bufp, len, 1), bufp
-       jmp     *bufp
+       JMP_NOSPEC bufp
 
        ################################################################
        ## 2a) PROCESS FULL BLOCKS:
index 399a29d..cb91a64 100644 (file)
@@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc,
 
        salsa20_ivsetup(ctx, walk.iv);
 
-       if (likely(walk.nbytes == nbytes))
-       {
-               salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
-                                     walk.dst.virt.addr, nbytes);
-               return blkcipher_walk_done(desc, &walk, 0);
-       }
-
        while (walk.nbytes >= 64) {
                salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
                                      walk.dst.virt.addr,
index 3fd8bc5..3f48f69 100644 (file)
@@ -1,6 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/jump_label.h>
 #include <asm/unwind_hints.h>
+#include <asm/cpufeatures.h>
+#include <asm/page_types.h>
+#include <asm/percpu.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor-flags.h>
 
 /*
 
@@ -187,6 +192,148 @@ For 32-bit we have the following conventions - kernel is built with
 #endif
 .endm
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+/*
+ * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
+ * halves:
+ */
+#define PTI_USER_PGTABLE_BIT           PAGE_SHIFT
+#define PTI_USER_PGTABLE_MASK          (1 << PTI_USER_PGTABLE_BIT)
+#define PTI_USER_PCID_BIT              X86_CR3_PTI_PCID_USER_BIT
+#define PTI_USER_PCID_MASK             (1 << PTI_USER_PCID_BIT)
+#define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
+
+.macro SET_NOFLUSH_BIT reg:req
+       bts     $X86_CR3_PCID_NOFLUSH_BIT, \reg
+.endm
+
+.macro ADJUST_KERNEL_CR3 reg:req
+       ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
+       /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
+       andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
+.endm
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+       mov     %cr3, \scratch_reg
+       ADJUST_KERNEL_CR3 \scratch_reg
+       mov     \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+#define THIS_CPU_user_pcid_flush_mask   \
+       PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
+
+.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+       mov     %cr3, \scratch_reg
+
+       ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
+
+       /*
+        * Test if the ASID needs a flush.
+        */
+       movq    \scratch_reg, \scratch_reg2
+       andq    $(0x7FF), \scratch_reg          /* mask ASID */
+       bt      \scratch_reg, THIS_CPU_user_pcid_flush_mask
+       jnc     .Lnoflush_\@
+
+       /* Flush needed, clear the bit */
+       btr     \scratch_reg, THIS_CPU_user_pcid_flush_mask
+       movq    \scratch_reg2, \scratch_reg
+       jmp     .Lwrcr3_pcid_\@
+
+.Lnoflush_\@:
+       movq    \scratch_reg2, \scratch_reg
+       SET_NOFLUSH_BIT \scratch_reg
+
+.Lwrcr3_pcid_\@:
+       /* Flip the ASID to the user version */
+       orq     $(PTI_USER_PCID_MASK), \scratch_reg
+
+.Lwrcr3_\@:
+       /* Flip the PGD to the user version */
+       orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
+       mov     \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+.macro SWITCH_TO_USER_CR3_STACK        scratch_reg:req
+       pushq   %rax
+       SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
+       popq    %rax
+.endm
+
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+       ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
+       movq    %cr3, \scratch_reg
+       movq    \scratch_reg, \save_reg
+       /*
+        * Test the user pagetable bit. If set, then the user page tables
+        * are active. If clear CR3 already has the kernel page table
+        * active.
+        */
+       bt      $PTI_USER_PGTABLE_BIT, \scratch_reg
+       jnc     .Ldone_\@
+
+       ADJUST_KERNEL_CR3 \scratch_reg
+       movq    \scratch_reg, %cr3
+
+.Ldone_\@:
+.endm
+
+.macro RESTORE_CR3 scratch_reg:req save_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+
+       ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
+
+       /*
+        * KERNEL pages can always resume with NOFLUSH as we do
+        * explicit flushes.
+        */
+       bt      $PTI_USER_PGTABLE_BIT, \save_reg
+       jnc     .Lnoflush_\@
+
+       /*
+        * Check if there's a pending flush for the user ASID we're
+        * about to set.
+        */
+       movq    \save_reg, \scratch_reg
+       andq    $(0x7FF), \scratch_reg
+       bt      \scratch_reg, THIS_CPU_user_pcid_flush_mask
+       jnc     .Lnoflush_\@
+
+       btr     \scratch_reg, THIS_CPU_user_pcid_flush_mask
+       jmp     .Lwrcr3_\@
+
+.Lnoflush_\@:
+       SET_NOFLUSH_BIT \save_reg
+
+.Lwrcr3_\@:
+       /*
+        * The CR3 write could be avoided when not changing its value,
+        * but would require a CR3 read *and* a scratch register.
+        */
+       movq    \save_reg, %cr3
+.Lend_\@:
+.endm
+
+#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+.endm
+.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
+.endm
+.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
+.endm
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+.endm
+.macro RESTORE_CR3 scratch_reg:req save_reg:req
+.endm
+
+#endif
+
 #endif /* CONFIG_X86_64 */
 
 /*
index 4838037..60c4c34 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/asm.h>
 #include <asm/smap.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
        .section .entry.text, "ax"
 
@@ -243,6 +244,17 @@ ENTRY(__switch_to_asm)
        movl    %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
 #endif
 
+#ifdef CONFIG_RETPOLINE
+       /*
+        * When switching from a shallower to a deeper call stack
+        * the RSB may either underflow or use entries populated
+        * with userspace addresses. On CPUs where those concerns
+        * exist, overwrite the RSB with entries which capture
+        * speculative execution to prevent attack.
+        */
+       FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+#endif
+
        /* restore callee-saved registers */
        popl    %esi
        popl    %edi
@@ -290,7 +302,7 @@ ENTRY(ret_from_fork)
 
        /* kernel thread */
 1:     movl    %edi, %eax
-       call    *%ebx
+       CALL_NOSPEC %ebx
        /*
         * A kernel thread is allowed to return here after successfully
         * calling do_execve().  Exit to userspace to complete the execve()
@@ -919,7 +931,7 @@ common_exception:
        movl    %ecx, %es
        TRACE_IRQS_OFF
        movl    %esp, %eax                      # pt_regs pointer
-       call    *%edi
+       CALL_NOSPEC %edi
        jmp     ret_from_exception
 END(common_exception)
 
@@ -941,9 +953,10 @@ ENTRY(debug)
        movl    %esp, %eax                      # pt_regs pointer
 
        /* Are we currently on the SYSENTER stack? */
-       PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
-       subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
-       cmpl    $SIZEOF_SYSENTER_stack, %ecx
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+       subl    %eax, %ecx      /* ecx = (end of entry_stack) - esp */
+       cmpl    $SIZEOF_entry_stack, %ecx
        jb      .Ldebug_from_sysenter_stack
 
        TRACE_IRQS_OFF
@@ -984,9 +997,10 @@ ENTRY(nmi)
        movl    %esp, %eax                      # pt_regs pointer
 
        /* Are we currently on the SYSENTER stack? */
-       PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
-       subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
-       cmpl    $SIZEOF_SYSENTER_stack, %ecx
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+       subl    %eax, %ecx      /* ecx = (end of entry_stack) - esp */
+       cmpl    $SIZEOF_entry_stack, %ecx
        jb      .Lnmi_from_sysenter_stack
 
        /* Not on SYSENTER stack. */
index f81d50d..ff6f802 100644 (file)
@@ -23,7 +23,6 @@
 #include <asm/segment.h>
 #include <asm/cache.h>
 #include <asm/errno.h>
-#include "calling.h"
 #include <asm/asm-offsets.h>
 #include <asm/msr.h>
 #include <asm/unistd.h>
 #include <asm/pgtable_types.h>
 #include <asm/export.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 #include <linux/err.h>
 
+#include "calling.h"
+
 .code64
 .section .entry.text, "ax"
 
@@ -140,6 +142,67 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
+       .pushsection .entry_trampoline, "ax"
+
+/*
+ * The code in here gets remapped into cpu_entry_area's trampoline.  This means
+ * that the assembler and linker have the wrong idea as to where this code
+ * lives (and, in fact, it's mapped more than once, so it's not even at a
+ * fixed address).  So we can't reference any symbols outside the entry
+ * trampoline and expect it to work.
+ *
+ * Instead, we carefully abuse %rip-relative addressing.
+ * _entry_trampoline(%rip) refers to the start of the remapped) entry
+ * trampoline.  We can thus find cpu_entry_area with this macro:
+ */
+
+#define CPU_ENTRY_AREA \
+       _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
+
+/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
+#define RSP_SCRATCH    CPU_ENTRY_AREA_entry_stack + \
+                       SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
+
+ENTRY(entry_SYSCALL_64_trampoline)
+       UNWIND_HINT_EMPTY
+       swapgs
+
+       /* Stash the user RSP. */
+       movq    %rsp, RSP_SCRATCH
+
+       /* Note: using %rsp as a scratch reg. */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
+       /* Load the top of the task stack into RSP */
+       movq    CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
+
+       /* Start building the simulated IRET frame. */
+       pushq   $__USER_DS                      /* pt_regs->ss */
+       pushq   RSP_SCRATCH                     /* pt_regs->sp */
+       pushq   %r11                            /* pt_regs->flags */
+       pushq   $__USER_CS                      /* pt_regs->cs */
+       pushq   %rcx                            /* pt_regs->ip */
+
+       /*
+        * x86 lacks a near absolute jump, and we can't jump to the real
+        * entry text with a relative jump.  We could push the target
+        * address and then use retq, but this destroys the pipeline on
+        * many CPUs (wasting over 20 cycles on Sandy Bridge).  Instead,
+        * spill RDI and restore it in a second-stage trampoline.
+        */
+       pushq   %rdi
+       movq    $entry_SYSCALL_64_stage2, %rdi
+       JMP_NOSPEC %rdi
+END(entry_SYSCALL_64_trampoline)
+
+       .popsection
+
+ENTRY(entry_SYSCALL_64_stage2)
+       UNWIND_HINT_EMPTY
+       popq    %rdi
+       jmp     entry_SYSCALL_64_after_hwframe
+END(entry_SYSCALL_64_stage2)
+
 ENTRY(entry_SYSCALL_64)
        UNWIND_HINT_EMPTY
        /*
@@ -149,6 +212,10 @@ ENTRY(entry_SYSCALL_64)
         */
 
        swapgs
+       /*
+        * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
+        * is not required to switch CR3.
+        */
        movq    %rsp, PER_CPU_VAR(rsp_scratch)
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
@@ -204,7 +271,12 @@ entry_SYSCALL_64_fastpath:
         * It might end up jumping to the slow path.  If it jumps, RAX
         * and all argument registers are clobbered.
         */
+#ifdef CONFIG_RETPOLINE
+       movq    sys_call_table(, %rax, 8), %rax
+       call    __x86_indirect_thunk_rax
+#else
        call    *sys_call_table(, %rax, 8)
+#endif
 .Lentry_SYSCALL_64_after_fastpath_call:
 
        movq    %rax, RAX(%rsp)
@@ -330,8 +402,25 @@ syscall_return_via_sysret:
        popq    %rsi    /* skip rcx */
        popq    %rdx
        popq    %rsi
+
+       /*
+        * Now all regs are restored except RSP and RDI.
+        * Save old stack pointer and switch to trampoline stack.
+        */
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+       pushq   RSP-RDI(%rdi)   /* RSP */
+       pushq   (%rdi)          /* RDI */
+
+       /*
+        * We are on the trampoline stack.  All regs except RDI are live.
+        * We can do future final exit work right here.
+        */
+       SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
+
        popq    %rdi
-       movq    RSP-ORIG_RAX(%rsp), %rsp
+       popq    %rsp
        USERGS_SYSRET64
 END(entry_SYSCALL_64)
 
@@ -359,7 +448,7 @@ ENTRY(stub_ptregs_64)
        jmp     entry_SYSCALL64_slow_path
 
 1:
-       jmp     *%rax                           /* Called from C */
+       JMP_NOSPEC %rax                         /* Called from C */
 END(stub_ptregs_64)
 
 .macro ptregs_stub func
@@ -402,6 +491,17 @@ ENTRY(__switch_to_asm)
        movq    %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
 #endif
 
+#ifdef CONFIG_RETPOLINE
+       /*
+        * When switching from a shallower to a deeper call stack
+        * the RSB may either underflow or use entries populated
+        * with userspace addresses. On CPUs where those concerns
+        * exist, overwrite the RSB with entries which capture
+        * speculative execution to prevent attack.
+        */
+       FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+#endif
+
        /* restore callee-saved registers */
        popq    %r15
        popq    %r14
@@ -438,7 +538,7 @@ ENTRY(ret_from_fork)
 1:
        /* kernel thread */
        movq    %r12, %rdi
-       call    *%rbx
+       CALL_NOSPEC %rbx
        /*
         * A kernel thread is allowed to return here after successfully
         * calling do_execve().  Exit to userspace to complete the execve()
@@ -466,12 +566,13 @@ END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
-       pushfq
-       testl $X86_EFLAGS_IF, (%rsp)
+       pushq %rax
+       SAVE_FLAGS(CLBR_RAX)
+       testl $X86_EFLAGS_IF, %eax
        jz .Lokay_\@
        ud2
 .Lokay_\@:
-       addq $8, %rsp
+       popq %rax
 #endif
 .endm
 
@@ -563,6 +664,13 @@ END(irq_entries_start)
 /* 0(%rsp): ~(interrupt number) */
        .macro interrupt func
        cld
+
+       testb   $3, CS-ORIG_RAX(%rsp)
+       jz      1f
+       SWAPGS
+       call    switch_to_thread_stack
+1:
+
        ALLOC_PT_GPREGS_ON_STACK
        SAVE_C_REGS
        SAVE_EXTRA_REGS
@@ -572,12 +680,8 @@ END(irq_entries_start)
        jz      1f
 
        /*
-        * IRQ from user mode.  Switch to kernel gsbase and inform context
-        * tracking that we're in kernel mode.
-        */
-       SWAPGS
-
-       /*
+        * IRQ from user mode.
+        *
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
         * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
@@ -630,10 +734,43 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
        ud2
 1:
 #endif
-       SWAPGS
        POP_EXTRA_REGS
-       POP_C_REGS
-       addq    $8, %rsp        /* skip regs->orig_ax */
+       popq    %r11
+       popq    %r10
+       popq    %r9
+       popq    %r8
+       popq    %rax
+       popq    %rcx
+       popq    %rdx
+       popq    %rsi
+
+       /*
+        * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
+        * Save old stack pointer and switch to trampoline stack.
+        */
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+       /* Copy the IRET frame to the trampoline stack. */
+       pushq   6*8(%rdi)       /* SS */
+       pushq   5*8(%rdi)       /* RSP */
+       pushq   4*8(%rdi)       /* EFLAGS */
+       pushq   3*8(%rdi)       /* CS */
+       pushq   2*8(%rdi)       /* RIP */
+
+       /* Push user RDI on the trampoline stack. */
+       pushq   (%rdi)
+
+       /*
+        * We are on the trampoline stack.  All regs except RDI are live.
+        * We can do future final exit work right here.
+        */
+
+       SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
+
+       /* Restore RDI. */
+       popq    %rdi
+       SWAPGS
        INTERRUPT_RETURN
 
 
@@ -713,7 +850,9 @@ native_irq_return_ldt:
         */
 
        pushq   %rdi                            /* Stash user RDI */
-       SWAPGS
+       SWAPGS                                  /* to kernel GS */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi   /* to kernel CR3 */
+
        movq    PER_CPU_VAR(espfix_waddr), %rdi
        movq    %rax, (0*8)(%rdi)               /* user RAX */
        movq    (1*8)(%rsp), %rax               /* user RIP */
@@ -729,7 +868,6 @@ native_irq_return_ldt:
        /* Now RAX == RSP. */
 
        andl    $0xffff0000, %eax               /* RAX = (RSP & 0xffff0000) */
-       popq    %rdi                            /* Restore user RDI */
 
        /*
         * espfix_stack[31:16] == 0.  The page tables are set up such that
@@ -740,7 +878,11 @@ native_irq_return_ldt:
         * still points to an RO alias of the ESPFIX stack.
         */
        orq     PER_CPU_VAR(espfix_stack), %rax
-       SWAPGS
+
+       SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
+       SWAPGS                                  /* to user GS */
+       popq    %rdi                            /* Restore user RDI */
+
        movq    %rax, %rsp
        UNWIND_HINT_IRET_REGS offset=8
 
@@ -829,7 +971,35 @@ apicinterrupt IRQ_WORK_VECTOR                      irq_work_interrupt              smp_irq_work_interrupt
 /*
  * Exception entry points.
  */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+
+/*
+ * Switch to the thread stack.  This is called with the IRET frame and
+ * orig_ax on the stack.  (That is, RDI..R12 are not on the stack and
+ * space has not been allocated for them.)
+ */
+ENTRY(switch_to_thread_stack)
+       UNWIND_HINT_FUNC
+
+       pushq   %rdi
+       /* Need to switch before accessing the thread stack. */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+       UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
+
+       pushq   7*8(%rdi)               /* regs->ss */
+       pushq   6*8(%rdi)               /* regs->rsp */
+       pushq   5*8(%rdi)               /* regs->eflags */
+       pushq   4*8(%rdi)               /* regs->cs */
+       pushq   3*8(%rdi)               /* regs->ip */
+       pushq   2*8(%rdi)               /* regs->orig_ax */
+       pushq   8(%rdi)                 /* return address */
+       UNWIND_HINT_FUNC
+
+       movq    (%rdi), %rdi
+       ret
+END(switch_to_thread_stack)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
@@ -848,11 +1018,12 @@ ENTRY(\sym)
 
        ALLOC_PT_GPREGS_ON_STACK
 
-       .if \paranoid
-       .if \paranoid == 1
+       .if \paranoid < 2
        testb   $3, CS(%rsp)                    /* If coming from userspace, switch stacks */
-       jnz     1f
+       jnz     .Lfrom_usermode_switch_stack_\@
        .endif
+
+       .if \paranoid
        call    paranoid_entry
        .else
        call    error_entry
@@ -894,20 +1065,15 @@ ENTRY(\sym)
        jmp     error_exit
        .endif
 
-       .if \paranoid == 1
+       .if \paranoid < 2
        /*
-        * Paranoid entry from userspace.  Switch stacks and treat it
+        * Entry from userspace.  Switch stacks and treat it
         * as a normal entry.  This means that paranoid handlers
         * run in real process context if user_mode(regs).
         */
-1:
+.Lfrom_usermode_switch_stack_\@:
        call    error_entry
 
-
-       movq    %rsp, %rdi                      /* pt_regs pointer */
-       call    sync_regs
-       movq    %rax, %rsp                      /* switch stack */
-
        movq    %rsp, %rdi                      /* pt_regs pointer */
 
        .if \has_error_code
@@ -1098,7 +1264,7 @@ idtentry async_page_fault do_async_page_fault     has_error_code=1
 #endif
 
 #ifdef CONFIG_X86_MCE
-idtentry machine_check                                 has_error_code=0        paranoid=1 do_sym=*machine_check_vector(%rip)
+idtentry machine_check         do_mce                  has_error_code=0        paranoid=1
 #endif
 
 /*
@@ -1119,7 +1285,11 @@ ENTRY(paranoid_entry)
        js      1f                              /* negative -> in kernel */
        SWAPGS
        xorl    %ebx, %ebx
-1:     ret
+
+1:
+       SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
+
+       ret
 END(paranoid_entry)
 
 /*
@@ -1141,6 +1311,7 @@ ENTRY(paranoid_exit)
        testl   %ebx, %ebx                      /* swapgs needed? */
        jnz     .Lparanoid_exit_no_swapgs
        TRACE_IRQS_IRETQ
+       RESTORE_CR3     scratch_reg=%rbx save_reg=%r14
        SWAPGS_UNSAFE_STACK
        jmp     .Lparanoid_exit_restore
 .Lparanoid_exit_no_swapgs:
@@ -1168,8 +1339,18 @@ ENTRY(error_entry)
         * from user mode due to an IRET fault.
         */
        SWAPGS
+       /* We have user CR3.  Change to kernel CR3. */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
 
 .Lerror_entry_from_usermode_after_swapgs:
+       /* Put us onto the real thread stack. */
+       popq    %r12                            /* save return addr in %12 */
+       movq    %rsp, %rdi                      /* arg0 = pt_regs pointer */
+       call    sync_regs
+       movq    %rax, %rsp                      /* switch stack */
+       ENCODE_FRAME_POINTER
+       pushq   %r12
+
        /*
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
@@ -1206,6 +1387,7 @@ ENTRY(error_entry)
         * .Lgs_change's error handler with kernel gsbase.
         */
        SWAPGS
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
        jmp .Lerror_entry_done
 
 .Lbstep_iret:
@@ -1215,10 +1397,11 @@ ENTRY(error_entry)
 
 .Lerror_bad_iret:
        /*
-        * We came from an IRET to user mode, so we have user gsbase.
-        * Switch to kernel gsbase:
+        * We came from an IRET to user mode, so we have user
+        * gsbase and CR3.  Switch to kernel gsbase and CR3:
         */
        SWAPGS
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
 
        /*
         * Pretend that the exception came from user mode: set up pt_regs
@@ -1250,6 +1433,10 @@ END(error_exit)
 /*
  * Runs on exception stack.  Xen PV does not go through this path at all,
  * so we can use real assembly here.
+ *
+ * Registers:
+ *     %r14: Used to save/restore the CR3 of the interrupted context
+ *           when PAGE_TABLE_ISOLATION is in use.  Do not clobber.
  */
 ENTRY(nmi)
        UNWIND_HINT_IRET_REGS
@@ -1313,6 +1500,7 @@ ENTRY(nmi)
 
        swapgs
        cld
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
        movq    %rsp, %rdx
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
        UNWIND_HINT_IRET_REGS base=%rdx offset=8
@@ -1565,6 +1753,8 @@ end_repeat_nmi:
        movq    $-1, %rsi
        call    do_nmi
 
+       RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
+
        testl   %ebx, %ebx                      /* swapgs needed? */
        jnz     nmi_restore
 nmi_swapgs:
index 568e130..98d5358 100644 (file)
  */
 ENTRY(entry_SYSENTER_compat)
        /* Interrupts are off on entry. */
-       SWAPGS_UNSAFE_STACK
+       SWAPGS
+
+       /* We are about to clobber %rsp anyway, clobbering here is OK */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        /*
@@ -186,8 +190,13 @@ ENTRY(entry_SYSCALL_compat)
        /* Interrupts are off on entry. */
        swapgs
 
-       /* Stash user ESP and switch to the kernel stack. */
+       /* Stash user ESP */
        movl    %esp, %r8d
+
+       /* Use %rsp as scratch reg. User ESP is stashed in r8 */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
+       /* Switch to the kernel stack */
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        /* Construct struct pt_regs on stack */
@@ -256,10 +265,22 @@ sysret32_from_system_call:
         * when the system call started, which is already known to user
         * code.  We zero R8-R10 to avoid info leaks.
          */
+       movq    RSP-ORIG_RAX(%rsp), %rsp
+
+       /*
+        * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
+        * on the process stack which is not mapped to userspace and
+        * not readable after we SWITCH_TO_USER_CR3.  Delay the CR3
+        * switch until after after the last reference to the process
+        * stack.
+        *
+        * %r8/%r9 are zeroed before the sysret, thus safe to clobber.
+        */
+       SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
+
        xorq    %r8, %r8
        xorq    %r9, %r9
        xorq    %r10, %r10
-       movq    RSP-ORIG_RAX(%rsp), %rsp
        swapgs
        sysretl
 END(entry_SYSCALL_compat)
@@ -306,8 +327,11 @@ ENTRY(entry_INT80_compat)
         */
        movl    %eax, %eax
 
-       /* Construct struct pt_regs on stack (iret frame is already on stack) */
        pushq   %rax                    /* pt_regs->orig_ax */
+
+       /* switch to thread stack expects orig_ax to be pushed */
+       call    switch_to_thread_stack
+
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
        pushq   %rdx                    /* pt_regs->dx */
index 11b13c4..f19856d 100644 (file)
@@ -324,5 +324,5 @@ notrace time_t __vdso_time(time_t *t)
                *t = result;
        return result;
 }
-int time(time_t *t)
+time_t time(time_t *t)
        __attribute__((weak, alias("__vdso_time")));
index f279ba2..577fa8a 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/unistd.h>
 #include <asm/fixmap.h>
 #include <asm/traps.h>
+#include <asm/paravirt.h>
 
 #define CREATE_TRACE_POINTS
 #include "vsyscall_trace.h"
@@ -138,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        WARN_ON_ONCE(address != regs->ip);
 
+       /* This should be unreachable in NATIVE mode. */
+       if (WARN_ON(vsyscall_mode == NATIVE))
+               return false;
+
        if (vsyscall_mode == NONE) {
                warn_bad_vsyscall(KERN_INFO, regs,
                                  "vsyscall attempted with vsyscall=none");
@@ -329,16 +334,47 @@ int in_gate_area_no_mm(unsigned long addr)
        return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
 }
 
+/*
+ * The VSYSCALL page is the only user-accessible page in the kernel address
+ * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
+ * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
+ * are enabled.
+ *
+ * Some day we may create a "minimal" vsyscall mode in which we emulate
+ * vsyscalls but leave the page not present.  If so, we skip calling
+ * this.
+ */
+void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
+{
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
+       set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
+       p4d = p4d_offset(pgd, VSYSCALL_ADDR);
+#if CONFIG_PGTABLE_LEVELS >= 5
+       p4d->p4d |= _PAGE_USER;
+#endif
+       pud = pud_offset(p4d, VSYSCALL_ADDR);
+       set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
+       pmd = pmd_offset(pud, VSYSCALL_ADDR);
+       set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
+}
+
 void __init map_vsyscall(void)
 {
        extern char __vsyscall_page;
        unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
 
-       if (vsyscall_mode != NONE)
+       if (vsyscall_mode != NONE) {
                __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
                             vsyscall_mode == NATIVE
                             ? PAGE_KERNEL_VSYSCALL
                             : PAGE_KERNEL_VVAR);
+               set_vsyscall_pgtable_user_bits(swapper_pg_dir);
+       }
 
        BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
                     (unsigned long)VSYSCALL_ADDR);
index a6eee5a..2aefacf 100644 (file)
@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
        int ret;
 
        if (!x86_match_cpu(cpu_match))
-               return 0;
+               return -ENODEV;
 
        if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
                return -ENODEV;
index 141e07b..24ffa1e 100644 (file)
@@ -582,6 +582,24 @@ static __init int bts_init(void)
        if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
                return -ENODEV;
 
+       if (boot_cpu_has(X86_FEATURE_PTI)) {
+               /*
+                * BTS hardware writes through a virtual memory map we must
+                * either use the kernel physical map, or the user mapping of
+                * the AUX buffer.
+                *
+                * However, since this driver supports per-CPU and per-task inherit
+                * we cannot use the user mapping since it will not be availble
+                * if we're not running the owning process.
+                *
+                * With PTI we can't use the kernal map either, because its not
+                * there when we run userspace.
+                *
+                * For now, disable this driver when using PTI.
+                */
+               return -ENODEV;
+       }
+
        bts_pmu.capabilities    = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
                                  PERF_PMU_CAP_EXCLUSIVE;
        bts_pmu.task_ctx_nr     = perf_sw_context;
index 09c26a4..731153a 100644 (file)
@@ -3847,6 +3847,8 @@ static struct attribute *intel_pmu_attrs[] = {
 
 __init int intel_pmu_init(void)
 {
+       struct attribute **extra_attr = NULL;
+       struct attribute **to_free = NULL;
        union cpuid10_edx edx;
        union cpuid10_eax eax;
        union cpuid10_ebx ebx;
@@ -3854,7 +3856,6 @@ __init int intel_pmu_init(void)
        unsigned int unused;
        struct extra_reg *er;
        int version, i;
-       struct attribute **extra_attr = NULL;
        char *name;
 
        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
@@ -4294,6 +4295,7 @@ __init int intel_pmu_init(void)
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
                extra_attr = merge_attr(extra_attr, skl_format_attr);
+               to_free = extra_attr;
                x86_pmu.cpu_events = get_hsw_events_attrs();
                intel_pmu_pebs_data_source_skl(
                        boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
@@ -4401,6 +4403,7 @@ __init int intel_pmu_init(void)
                pr_cont("full-width counters, ");
        }
 
+       kfree(to_free);
        return 0;
 }
 
index 3674a4b..18c25ab 100644 (file)
@@ -3,16 +3,19 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 
+#include <asm/cpu_entry_area.h>
 #include <asm/perf_event.h>
+#include <asm/tlbflush.h>
 #include <asm/insn.h>
 
 #include "../perf_event.h"
 
+/* Waste a full page so it can be mapped into the cpu_entry_area */
+DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
+
 /* The size of a BTS record in bytes: */
 #define BTS_RECORD_SIZE                24
 
-#define BTS_BUFFER_SIZE                (PAGE_SIZE << 4)
-#define PEBS_BUFFER_SIZE       (PAGE_SIZE << 4)
 #define PEBS_FIXUP_SIZE                PAGE_SIZE
 
 /*
@@ -279,17 +282,67 @@ void fini_debug_store_on_cpu(int cpu)
 
 static DEFINE_PER_CPU(void *, insn_buffer);
 
-static int alloc_pebs_buffer(int cpu)
+static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
 {
-       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+       unsigned long start = (unsigned long)cea;
+       phys_addr_t pa;
+       size_t msz = 0;
+
+       pa = virt_to_phys(addr);
+
+       preempt_disable();
+       for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
+               cea_set_pte(cea, pa, prot);
+
+       /*
+        * This is a cross-CPU update of the cpu_entry_area, we must shoot down
+        * all TLB entries for it.
+        */
+       flush_tlb_kernel_range(start, start + size);
+       preempt_enable();
+}
+
+static void ds_clear_cea(void *cea, size_t size)
+{
+       unsigned long start = (unsigned long)cea;
+       size_t msz = 0;
+
+       preempt_disable();
+       for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
+               cea_set_pte(cea, 0, PAGE_NONE);
+
+       flush_tlb_kernel_range(start, start + size);
+       preempt_enable();
+}
+
+static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
+{
+       unsigned int order = get_order(size);
        int node = cpu_to_node(cpu);
-       int max;
-       void *buffer, *ibuffer;
+       struct page *page;
+
+       page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
+       return page ? page_address(page) : NULL;
+}
+
+static void dsfree_pages(const void *buffer, size_t size)
+{
+       if (buffer)
+               free_pages((unsigned long)buffer, get_order(size));
+}
+
+static int alloc_pebs_buffer(int cpu)
+{
+       struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
+       struct debug_store *ds = hwev->ds;
+       size_t bsiz = x86_pmu.pebs_buffer_size;
+       int max, node = cpu_to_node(cpu);
+       void *buffer, *ibuffer, *cea;
 
        if (!x86_pmu.pebs)
                return 0;
 
-       buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
+       buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
        if (unlikely(!buffer))
                return -ENOMEM;
 
@@ -300,99 +353,94 @@ static int alloc_pebs_buffer(int cpu)
        if (x86_pmu.intel_cap.pebs_format < 2) {
                ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
                if (!ibuffer) {
-                       kfree(buffer);
+                       dsfree_pages(buffer, bsiz);
                        return -ENOMEM;
                }
                per_cpu(insn_buffer, cpu) = ibuffer;
        }
-
-       max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
-
-       ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+       hwev->ds_pebs_vaddr = buffer;
+       /* Update the cpu entry area mapping */
+       cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
+       ds->pebs_buffer_base = (unsigned long) cea;
+       ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
        ds->pebs_index = ds->pebs_buffer_base;
-       ds->pebs_absolute_maximum = ds->pebs_buffer_base +
-               max * x86_pmu.pebs_record_size;
-
+       max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
+       ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
        return 0;
 }
 
 static void release_pebs_buffer(int cpu)
 {
-       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+       struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
+       void *cea;
 
-       if (!ds || !x86_pmu.pebs)
+       if (!x86_pmu.pebs)
                return;
 
        kfree(per_cpu(insn_buffer, cpu));
        per_cpu(insn_buffer, cpu) = NULL;
 
-       kfree((void *)(unsigned long)ds->pebs_buffer_base);
-       ds->pebs_buffer_base = 0;
+       /* Clear the fixmap */
+       cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
+       ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
+       dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
+       hwev->ds_pebs_vaddr = NULL;
 }
 
 static int alloc_bts_buffer(int cpu)
 {
-       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-       int node = cpu_to_node(cpu);
-       int max, thresh;
-       void *buffer;
+       struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
+       struct debug_store *ds = hwev->ds;
+       void *buffer, *cea;
+       int max;
 
        if (!x86_pmu.bts)
                return 0;
 
-       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+       buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
        if (unlikely(!buffer)) {
                WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
                return -ENOMEM;
        }
-
-       max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
-       thresh = max / 16;
-
-       ds->bts_buffer_base = (u64)(unsigned long)buffer;
+       hwev->ds_bts_vaddr = buffer;
+       /* Update the fixmap */
+       cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
+       ds->bts_buffer_base = (unsigned long) cea;
+       ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
        ds->bts_index = ds->bts_buffer_base;
-       ds->bts_absolute_maximum = ds->bts_buffer_base +
-               max * BTS_RECORD_SIZE;
-       ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
-               thresh * BTS_RECORD_SIZE;
-
+       max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
+       ds->bts_absolute_maximum = ds->bts_buffer_base + max;
+       ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
        return 0;
 }
 
 static void release_bts_buffer(int cpu)
 {
-       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+       struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
+       void *cea;
 
-       if (!ds || !x86_pmu.bts)
+       if (!x86_pmu.bts)
                return;
 
-       kfree((void *)(unsigned long)ds->bts_buffer_base);
-       ds->bts_buffer_base = 0;
+       /* Clear the fixmap */
+       cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
+       ds_clear_cea(cea, BTS_BUFFER_SIZE);
+       dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
+       hwev->ds_bts_vaddr = NULL;
 }
 
 static int alloc_ds_buffer(int cpu)
 {
-       int node = cpu_to_node(cpu);
-       struct debug_store *ds;
-
-       ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
-       if (unlikely(!ds))
-               return -ENOMEM;
+       struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
 
+       memset(ds, 0, sizeof(*ds));
        per_cpu(cpu_hw_events, cpu).ds = ds;
-
        return 0;
 }
 
 static void release_ds_buffer(int cpu)
 {
-       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
-       if (!ds)
-               return;
-
        per_cpu(cpu_hw_events, cpu).ds = NULL;
-       kfree(ds);
 }
 
 void release_ds_buffers(void)
@@ -402,16 +450,22 @@ void release_ds_buffers(void)
        if (!x86_pmu.bts && !x86_pmu.pebs)
                return;
 
-       get_online_cpus();
-       for_each_online_cpu(cpu)
+       for_each_possible_cpu(cpu)
+               release_ds_buffer(cpu);
+
+       for_each_possible_cpu(cpu) {
+               /*
+                * Again, ignore errors from offline CPUs, they will no longer
+                * observe cpu_hw_events.ds and not program the DS_AREA when
+                * they come up.
+                */
                fini_debug_store_on_cpu(cpu);
+       }
 
        for_each_possible_cpu(cpu) {
                release_pebs_buffer(cpu);
                release_bts_buffer(cpu);
-               release_ds_buffer(cpu);
        }
-       put_online_cpus();
 }
 
 void reserve_ds_buffers(void)
@@ -431,8 +485,6 @@ void reserve_ds_buffers(void)
        if (!x86_pmu.pebs)
                pebs_err = 1;
 
-       get_online_cpus();
-
        for_each_possible_cpu(cpu) {
                if (alloc_ds_buffer(cpu)) {
                        bts_err = 1;
@@ -469,11 +521,14 @@ void reserve_ds_buffers(void)
                if (x86_pmu.pebs && !pebs_err)
                        x86_pmu.pebs_active = 1;
 
-               for_each_online_cpu(cpu)
+               for_each_possible_cpu(cpu) {
+                       /*
+                        * Ignores wrmsr_on_cpu() errors for offline CPUs they
+                        * will get this call through intel_pmu_cpu_starting().
+                        */
                        init_debug_store_on_cpu(cpu);
+               }
        }
-
-       put_online_cpus();
 }
 
 /*
index 005908e..a2efb49 100644 (file)
@@ -755,14 +755,14 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
-       X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X,    hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X,    hsx_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,  hsw_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE,   hsw_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E,   hsw_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,      hsx_rapl_init),
-       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
index f7aaadf..8e4ea14 100644 (file)
@@ -14,6 +14,8 @@
 
 #include <linux/perf_event.h>
 
+#include <asm/intel_ds.h>
+
 /* To enable MSR tracing please use the generic trace points. */
 
 /*
@@ -77,8 +79,6 @@ struct amd_nb {
        struct event_constraint event_constraints[X86_PMC_IDX_MAX];
 };
 
-/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS                8
 #define PEBS_COUNTER_MASK      ((1ULL << MAX_PEBS_EVENTS) - 1)
 
 /*
@@ -95,23 +95,6 @@ struct amd_nb {
        PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
        PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
 
-/*
- * A debug store configuration.
- *
- * We only support architectures that use 64bit fields.
- */
-struct debug_store {
-       u64     bts_buffer_base;
-       u64     bts_index;
-       u64     bts_absolute_maximum;
-       u64     bts_interrupt_threshold;
-       u64     pebs_buffer_base;
-       u64     pebs_index;
-       u64     pebs_absolute_maximum;
-       u64     pebs_interrupt_threshold;
-       u64     pebs_event_reset[MAX_PEBS_EVENTS];
-};
-
 #define PEBS_REGS \
        (PERF_REG_X86_AX | \
         PERF_REG_X86_BX | \
@@ -216,6 +199,8 @@ struct cpu_hw_events {
         * Intel DebugStore bits
         */
        struct debug_store      *ds;
+       void                    *ds_pebs_vaddr;
+       void                    *ds_bts_vaddr;
        u64                     pebs_enabled;
        int                     n_pebs;
        int                     n_large_pebs;
index 8d0ec9d..44f5d79 100644 (file)
@@ -49,7 +49,7 @@ extern int acpi_fix_pin2_polarity;
 extern int acpi_disable_cmcff;
 
 extern u8 acpi_sci_flags;
-extern int acpi_sci_override_gsi;
+extern u32 acpi_sci_override_gsi;
 void acpi_pic_sci_set_trigger(unsigned int, u16);
 
 struct device;
index dbfd085..cf5961c 100644 (file)
@@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
        ".popsection\n"                                                 \
        ".pushsection .altinstr_replacement, \"ax\"\n"                  \
        ALTINSTR_REPLACEMENT(newinstr, feature, 1)                      \
-       ".popsection"
+       ".popsection\n"
 
 #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
        OLDINSTR_2(oldinstr, 1, 2)                                      \
@@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
        ".pushsection .altinstr_replacement, \"ax\"\n"                  \
        ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)                    \
        ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
-       ".popsection"
+       ".popsection\n"
 
 /*
  * Alternative instructions for different CPU types or capabilities.
index a9e57f0..9872277 100644 (file)
@@ -136,6 +136,7 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
 extern void disable_local_APIC(void);
 extern void lapic_shutdown(void);
 extern void sync_Arb_IDs(void);
+extern void init_bsp_APIC(void);
 extern void apic_intr_mode_init(void);
 extern void setup_local_APIC(void);
 extern void init_apic_mappings(void);
index ff700d8..1908214 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/special_insns.h>
 #include <asm/preempt.h>
+#include <asm/asm.h>
 
 #ifndef CONFIG_X86_CMPXCHG64
 extern void cmpxchg8b_emu(void);
 #endif
+
+#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_X86_32
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
+#else
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
+INDIRECT_THUNK(8)
+INDIRECT_THUNK(9)
+INDIRECT_THUNK(10)
+INDIRECT_THUNK(11)
+INDIRECT_THUNK(12)
+INDIRECT_THUNK(13)
+INDIRECT_THUNK(14)
+INDIRECT_THUNK(15)
+#endif
+INDIRECT_THUNK(ax)
+INDIRECT_THUNK(bx)
+INDIRECT_THUNK(cx)
+INDIRECT_THUNK(dx)
+INDIRECT_THUNK(si)
+INDIRECT_THUNK(di)
+INDIRECT_THUNK(bp)
+#endif /* CONFIG_RETPOLINE */
index 219faae..386a690 100644 (file)
 #endif
 
 #ifndef __ASSEMBLY__
+#ifndef __BPF__
 /*
  * This output constraint should be used for any inline asm which has a "call"
  * instruction.  Otherwise the asm may be inserted before the frame pointer
 register unsigned long current_stack_pointer asm(_ASM_SP);
 #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
 #endif
+#endif
 
 #endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
new file mode 100644 (file)
index 0000000..4a7884b
--- /dev/null
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _ASM_X86_CPU_ENTRY_AREA_H
+#define _ASM_X86_CPU_ENTRY_AREA_H
+
+#include <linux/percpu-defs.h>
+#include <asm/processor.h>
+#include <asm/intel_ds.h>
+
+/*
+ * cpu_entry_area is a percpu region that contains things needed by the CPU
+ * and early entry/exit code.  Real types aren't used for all fields here
+ * to avoid circular header dependencies.
+ *
+ * Every field is a virtual alias of some other allocated backing store.
+ * There is no direct allocation of a struct cpu_entry_area.
+ */
+struct cpu_entry_area {
+       char gdt[PAGE_SIZE];
+
+       /*
+        * The GDT is just below entry_stack and thus serves (on x86_64) as
+        * a a read-only guard page.
+        */
+       struct entry_stack_page entry_stack_page;
+
+       /*
+        * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
+        * we need task switches to work, and task switches write to the TSS.
+        */
+       struct tss_struct tss;
+
+       char entry_trampoline[PAGE_SIZE];
+
+#ifdef CONFIG_X86_64
+       /*
+        * Exception stacks used for IST entries.
+        *
+        * In the future, this should have a separate slot for each stack
+        * with guard pages between them.
+        */
+       char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+#endif
+#ifdef CONFIG_CPU_SUP_INTEL
+       /*
+        * Per CPU debug store for Intel performance monitoring. Wastes a
+        * full page at the moment.
+        */
+       struct debug_store cpu_debug_store;
+       /*
+        * The actual PEBS/BTS buffers must be mapped to user space
+        * Reserve enough fixmap PTEs.
+        */
+       struct debug_store_buffers cpu_debug_buffers;
+#endif
+};
+
+#define CPU_ENTRY_AREA_SIZE    (sizeof(struct cpu_entry_area))
+#define CPU_ENTRY_AREA_TOT_SIZE        (CPU_ENTRY_AREA_SIZE * NR_CPUS)
+
+DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+
+extern void setup_cpu_entry_areas(void);
+extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
+
+#define        CPU_ENTRY_AREA_RO_IDT           CPU_ENTRY_AREA_BASE
+#define CPU_ENTRY_AREA_PER_CPU         (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
+
+#define CPU_ENTRY_AREA_RO_IDT_VADDR    ((void *)CPU_ENTRY_AREA_RO_IDT)
+
+#define CPU_ENTRY_AREA_MAP_SIZE                        \
+       (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
+
+extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
+
+static inline struct entry_stack *cpu_entry_stack(int cpu)
+{
+       return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
+}
+
+#endif
index bf6a762..ea9a7dd 100644 (file)
@@ -135,6 +135,8 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
        set_bit(bit, (unsigned long *)cpu_caps_set);    \
 } while (0)
 
+#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
index c0b0e9e..25b9375 100644 (file)
 #define X86_FEATURE_CAT_L3             ( 7*32+ 4) /* Cache Allocation Technology L3 */
 #define X86_FEATURE_CAT_L2             ( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3             ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_INVPCID_SINGLE     ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
 
 #define X86_FEATURE_HW_PSTATE          ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_SME                        ( 7*32+10) /* AMD Secure Memory Encryption */
-
+#define X86_FEATURE_PTI                        ( 7*32+11) /* Kernel Page Table Isolation enabled */
+#define X86_FEATURE_RETPOLINE          ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD      ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_INTEL_PT           ( 7*32+15) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512_4VNNIW      ( 7*32+16) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS      ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
 #define X86_FEATURE_MBA                        ( 7*32+18) /* Memory Bandwidth Allocation */
+#define X86_FEATURE_RSB_CTXSW          ( 7*32+19) /* Fill RSB on context switches */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_AVX512IFMA         ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
 #define X86_FEATURE_CLFLUSHOPT         ( 9*32+23) /* CLFLUSHOPT instruction */
 #define X86_FEATURE_CLWB               ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_INTEL_PT           ( 9*32+25) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512PF           ( 9*32+26) /* AVX-512 Prefetch */
 #define X86_FEATURE_AVX512ER           ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
 #define X86_FEATURE_AVX512CD           ( 9*32+28) /* AVX-512 Conflict Detection */
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired Count */
+#define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP error pointers */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
 #define X86_BUG_SWAPGS_FENCE           X86_BUG(11) /* SWAPGS without input dep on GS */
 #define X86_BUG_MONITOR                        X86_BUG(12) /* IPI required to wake up remote CPU */
 #define X86_BUG_AMD_E400               X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN           X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1             X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2             X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 4011cb0..13c5ee8 100644 (file)
@@ -7,6 +7,7 @@
 #include <asm/mmu.h>
 #include <asm/fixmap.h>
 #include <asm/irq_vectors.h>
+#include <asm/cpu_entry_area.h>
 
 #include <linux/smp.h>
 #include <linux/percpu.h>
@@ -20,6 +21,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
 
        desc->type              = (info->read_exec_only ^ 1) << 1;
        desc->type             |= info->contents << 2;
+       /* Set the ACCESS bit so it can be mapped RO */
+       desc->type             |= 1;
 
        desc->s                 = 1;
        desc->dpl               = 0x3;
@@ -60,17 +63,10 @@ static inline struct desc_struct *get_current_gdt_rw(void)
        return this_cpu_ptr(&gdt_page)->gdt;
 }
 
-/* Get the fixmap index for a specific processor */
-static inline unsigned int get_cpu_gdt_ro_index(int cpu)
-{
-       return FIX_GDT_REMAP_BEGIN + cpu;
-}
-
 /* Provide the fixmap address of the remapped GDT */
 static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
 {
-       unsigned int idx = get_cpu_gdt_ro_index(cpu);
-       return (struct desc_struct *)__fix_to_virt(idx);
+       return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
 }
 
 /* Provide the current read-only GDT */
@@ -185,7 +181,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
 #endif
 }
 
-static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
+static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
 {
        struct desc_struct *d = get_cpu_gdt_rw(cpu);
        tss_desc tss;
index 14d6d50..b027633 100644 (file)
 # define DISABLE_LA57  (1<<(X86_FEATURE_LA57 & 31))
 #endif
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define DISABLE_PTI           0
+#else
+# define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
+#endif
+
 /*
  * Make sure to add features to the correct mask
  */
@@ -60,7 +66,7 @@
 #define DISABLED_MASK4 (DISABLE_PCID)
 #define DISABLED_MASK5 0
 #define DISABLED_MASK6 0
-#define DISABLED_MASK7 0
+#define DISABLED_MASK7 (DISABLE_PTI)
 #define DISABLED_MASK8 0
 #define DISABLED_MASK9 (DISABLE_MPX)
 #define DISABLED_MASK10        0
index 0211029..6777480 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef _ASM_X86_ESPFIX_H
 #define _ASM_X86_ESPFIX_H
 
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_ESPFIX64
 
 #include <asm/percpu.h>
 
@@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
 
 extern void init_espfix_bsp(void);
 extern void init_espfix_ap(int cpu);
-
-#endif /* CONFIG_X86_64 */
+#else
+static inline void init_espfix_ap(int cpu) { }
+#endif
 
 #endif /* _ASM_X86_ESPFIX_H */
index b0c505f..64c4a30 100644 (file)
@@ -44,7 +44,6 @@ extern unsigned long __FIXADDR_TOP;
                         PAGE_SIZE)
 #endif
 
-
 /*
  * Here we define all the compile-time 'special' virtual
  * addresses. The point is to have a constant address at
@@ -84,7 +83,6 @@ enum fixed_addresses {
        FIX_IO_APIC_BASE_0,
        FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
 #endif
-       FIX_RO_IDT,     /* Virtual mapping for read-only IDT */
 #ifdef CONFIG_X86_32
        FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
        FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
@@ -100,9 +98,6 @@ enum fixed_addresses {
 #ifdef CONFIG_X86_INTEL_MID
        FIX_LNW_VRTC,
 #endif
-       /* Fixmap entries to remap the GDTs, one per processor. */
-       FIX_GDT_REMAP_BEGIN,
-       FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
 
 #ifdef CONFIG_ACPI_APEI_GHES
        /* Used for GHES mapping from assorted contexts */
@@ -143,7 +138,7 @@ enum fixed_addresses {
 extern void reserve_top_address(unsigned long reserve);
 
 #define FIXADDR_SIZE   (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START          (FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXADDR_START  (FIXADDR_TOP - FIXADDR_SIZE)
 
 extern int fixmaps_set;
 
index 1b0a5ab..96aa6b9 100644 (file)
 #ifndef _ASM_X86_HYPERVISOR_H
 #define _ASM_X86_HYPERVISOR_H
 
-#ifdef CONFIG_HYPERVISOR_GUEST
-
-#include <asm/kvm_para.h>
-#include <asm/x86_init.h>
-#include <asm/xen/hypervisor.h>
-
-/*
- * x86 hypervisor information
- */
-
+/* x86 hypervisor types  */
 enum x86_hypervisor_type {
        X86_HYPER_NATIVE = 0,
        X86_HYPER_VMWARE,
@@ -39,6 +30,12 @@ enum x86_hypervisor_type {
        X86_HYPER_KVM,
 };
 
+#ifdef CONFIG_HYPERVISOR_GUEST
+
+#include <asm/kvm_para.h>
+#include <asm/x86_init.h>
+#include <asm/xen/hypervisor.h>
+
 struct hypervisor_x86 {
        /* Hypervisor name */
        const char      *name;
@@ -58,7 +55,15 @@ struct hypervisor_x86 {
 
 extern enum x86_hypervisor_type x86_hyper_type;
 extern void init_hypervisor_platform(void);
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+       return x86_hyper_type == type;
+}
 #else
 static inline void init_hypervisor_platform(void) { }
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+       return type == X86_HYPER_NATIVE;
+}
 #endif /* CONFIG_HYPERVISOR_GUEST */
 #endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
new file mode 100644 (file)
index 0000000..62a9f49
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _ASM_INTEL_DS_H
+#define _ASM_INTEL_DS_H
+
+#include <linux/percpu-defs.h>
+
+#define BTS_BUFFER_SIZE                (PAGE_SIZE << 4)
+#define PEBS_BUFFER_SIZE       (PAGE_SIZE << 4)
+
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS                8
+
+/*
+ * A debug store configuration.
+ *
+ * We only support architectures that use 64bit fields.
+ */
+struct debug_store {
+       u64     bts_buffer_base;
+       u64     bts_index;
+       u64     bts_absolute_maximum;
+       u64     bts_interrupt_threshold;
+       u64     pebs_buffer_base;
+       u64     pebs_index;
+       u64     pebs_absolute_maximum;
+       u64     pebs_interrupt_threshold;
+       u64     pebs_event_reset[MAX_PEBS_EVENTS];
+} __aligned(PAGE_SIZE);
+
+DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
+
+struct debug_store_buffers {
+       char    bts_buffer[BTS_BUFFER_SIZE];
+       char    pebs_buffer[PEBS_BUFFER_SIZE];
+};
+
+#endif
diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h
new file mode 100644 (file)
index 0000000..989cfa8
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_INVPCID
+#define _ASM_X86_INVPCID
+
+static inline void __invpcid(unsigned long pcid, unsigned long addr,
+                            unsigned long type)
+{
+       struct { u64 d[2]; } desc = { { pcid, addr } };
+
+       /*
+        * The memory clobber is because the whole point is to invalidate
+        * stale TLB entries and, especially if we're flushing global
+        * mappings, we don't want the compiler to reorder any subsequent
+        * memory accesses before the TLB flush.
+        *
+        * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
+        * invpcid (%rcx), %rax in long mode.
+        */
+       asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
+                     : : "m" (desc), "a" (type), "c" (&desc) : "memory");
+}
+
+#define INVPCID_TYPE_INDIV_ADDR                0
+#define INVPCID_TYPE_SINGLE_CTXT       1
+#define INVPCID_TYPE_ALL_INCL_GLOBAL   2
+#define INVPCID_TYPE_ALL_NON_GLOBAL    3
+
+/* Flush all mappings for a given pcid and addr, not including globals. */
+static inline void invpcid_flush_one(unsigned long pcid,
+                                    unsigned long addr)
+{
+       __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
+}
+
+/* Flush all mappings for a given PCID, not including globals. */
+static inline void invpcid_flush_single_context(unsigned long pcid)
+{
+       __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
+}
+
+/* Flush all mappings, including globals, for all PCIDs. */
+static inline void invpcid_flush_all(void)
+{
+       __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
+}
+
+/* Flush all mappings for all PCIDs except globals. */
+static inline void invpcid_flush_all_nonglobals(void)
+{
+       __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
+}
+
+#endif /* _ASM_X86_INVPCID */
index 139feef..c066ffa 100644 (file)
@@ -44,7 +44,7 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
 extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
                              unsigned int nr_irqs);
 extern int mp_irqdomain_activate(struct irq_domain *domain,
-                                struct irq_data *irq_data, bool early);
+                                struct irq_data *irq_data, bool reserve);
 extern void mp_irqdomain_deactivate(struct irq_domain *domain,
                                    struct irq_data *irq_data);
 extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
index c8ef23f..89f0895 100644 (file)
@@ -142,6 +142,9 @@ static inline notrace unsigned long arch_local_irq_save(void)
        swapgs;                                 \
        sysretl
 
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(x)          pushfq; popq %rax
+#endif
 #else
 #define INTERRUPT_RETURN               iret
 #define ENABLE_INTERRUPTS_SYSEXIT      sti; sysexit
index f86a8ca..395c963 100644 (file)
@@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
 extern void __show_regs(struct pt_regs *regs, int all);
+extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
 
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
index 034caa1..b24b1c8 100644 (file)
@@ -214,8 +214,6 @@ struct x86_emulate_ops {
        void (*halt)(struct x86_emulate_ctxt *ctxt);
        void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
        int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
-       void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
-       void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
        int (*intercept)(struct x86_emulate_ctxt *ctxt,
                         struct x86_instruction_info *info,
                         enum x86_intercept_stage stage);
index 1bfb997..5167984 100644 (file)
@@ -536,7 +536,20 @@ struct kvm_vcpu_arch {
        struct kvm_mmu_memory_cache mmu_page_cache;
        struct kvm_mmu_memory_cache mmu_page_header_cache;
 
+       /*
+        * QEMU userspace and the guest each have their own FPU state.
+        * In vcpu_run, we switch between the user and guest FPU contexts.
+        * While running a VCPU, the VCPU thread will have the guest FPU
+        * context.
+        *
+        * Note that while the PKRU state lives inside the fpu registers,
+        * it is switched out separately at VMENTER and VMEXIT time. The
+        * "guest_fpu" state here contains the guest FPU context, with the
+        * host PRKU bits.
+        */
+       struct fpu user_fpu;
        struct fpu guest_fpu;
+
        u64 xcr0;
        u64 guest_supported_xcr0;
        u32 guest_xstate_size;
@@ -1161,7 +1174,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
 static inline int emulate_instruction(struct kvm_vcpu *vcpu,
                        int emulation_type)
 {
-       return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+       return x86_emulate_instruction(vcpu, 0,
+                       emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
 }
 
 void kvm_enable_efer_bits(u64);
@@ -1434,4 +1448,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end);
+
 #endif /* _ASM_X86_KVM_HOST_H */
index c9459a4..22c5f3e 100644 (file)
@@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
 
 void __init sme_early_init(void);
 
-void __init sme_encrypt_kernel(void);
+void __init sme_encrypt_kernel(struct boot_params *bp);
 void __init sme_enable(struct boot_params *bp);
 
 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
@@ -67,7 +67,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
 
 static inline void __init sme_early_init(void) { }
 
-static inline void __init sme_encrypt_kernel(void) { }
+static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline bool sme_active(void) { return false; }
index 9ea26f1..5ff3e8a 100644 (file)
@@ -3,6 +3,7 @@
 #define _ASM_X86_MMU_H
 
 #include <linux/spinlock.h>
+#include <linux/rwsem.h>
 #include <linux/mutex.h>
 #include <linux/atomic.h>
 
@@ -27,7 +28,8 @@ typedef struct {
        atomic64_t tlb_gen;
 
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
-       struct ldt_struct *ldt;
+       struct rw_semaphore     ldt_usr_sem;
+       struct ldt_struct       *ldt;
 #endif
 
 #ifdef CONFIG_X86_64
index 6d16d15..c931b88 100644 (file)
@@ -50,22 +50,53 @@ struct ldt_struct {
         * call gates.  On native, we could merge the ldt_struct and LDT
         * allocations, but it's not worth trying to optimize.
         */
-       struct desc_struct *entries;
-       unsigned int nr_entries;
+       struct desc_struct      *entries;
+       unsigned int            nr_entries;
+
+       /*
+        * If PTI is in use, then the entries array is not mapped while we're
+        * in user mode.  The whole array will be aliased at the addressed
+        * given by ldt_slot_va(slot).  We use two slots so that we can allocate
+        * and map, and enable a new LDT without invalidating the mapping
+        * of an older, still-in-use LDT.
+        *
+        * slot will be -1 if this LDT doesn't have an alias mapping.
+        */
+       int                     slot;
 };
 
+/* This is a multiple of PAGE_SIZE. */
+#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
+
+static inline void *ldt_slot_va(int slot)
+{
+#ifdef CONFIG_X86_64
+       return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
+#else
+       BUG();
+#endif
+}
+
 /*
  * Used for LDT copy/destruction.
  */
-int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
+static inline void init_new_context_ldt(struct mm_struct *mm)
+{
+       mm->context.ldt = NULL;
+       init_rwsem(&mm->context.ldt_usr_sem);
+}
+int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
 void destroy_context_ldt(struct mm_struct *mm);
+void ldt_arch_exit_mmap(struct mm_struct *mm);
 #else  /* CONFIG_MODIFY_LDT_SYSCALL */
-static inline int init_new_context_ldt(struct task_struct *tsk,
-                                      struct mm_struct *mm)
+static inline void init_new_context_ldt(struct mm_struct *mm) { }
+static inline int ldt_dup_context(struct mm_struct *oldmm,
+                                 struct mm_struct *mm)
 {
        return 0;
 }
-static inline void destroy_context_ldt(struct mm_struct *mm) {}
+static inline void destroy_context_ldt(struct mm_struct *mm) { }
+static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
 #endif
 
 static inline void load_mm_ldt(struct mm_struct *mm)
@@ -90,10 +121,31 @@ static inline void load_mm_ldt(struct mm_struct *mm)
         * that we can see.
         */
 
-       if (unlikely(ldt))
-               set_ldt(ldt->entries, ldt->nr_entries);
-       else
+       if (unlikely(ldt)) {
+               if (static_cpu_has(X86_FEATURE_PTI)) {
+                       if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
+                               /*
+                                * Whoops -- either the new LDT isn't mapped
+                                * (if slot == -1) or is mapped into a bogus
+                                * slot (if slot > 1).
+                                */
+                               clear_LDT();
+                               return;
+                       }
+
+                       /*
+                        * If page table isolation is enabled, ldt->entries
+                        * will not be mapped in the userspace pagetables.
+                        * Tell the CPU to access the LDT through the alias
+                        * at ldt_slot_va(ldt->slot).
+                        */
+                       set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
+               } else {
+                       set_ldt(ldt->entries, ldt->nr_entries);
+               }
+       } else {
                clear_LDT();
+       }
 #else
        clear_LDT();
 #endif
@@ -132,18 +184,21 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
+       mutex_init(&mm->context.lock);
+
        mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
        atomic64_set(&mm->context.tlb_gen, 0);
 
-       #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
        if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
                /* pkey 0 is the default and always allocated */
                mm->context.pkey_allocation_map = 0x1;
                /* -1 means unallocated or invalid */
                mm->context.execute_only_pkey = -1;
        }
-       #endif
-       return init_new_context_ldt(tsk, mm);
+#endif
+       init_new_context_ldt(mm);
+       return 0;
 }
 static inline void destroy_context(struct mm_struct *mm)
 {
@@ -176,15 +231,16 @@ do {                                              \
 } while (0)
 #endif
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-                                struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
        paravirt_arch_dup_mmap(oldmm, mm);
+       return ldt_dup_context(oldmm, mm);
 }
 
 static inline void arch_exit_mmap(struct mm_struct *mm)
 {
        paravirt_arch_exit_mmap(mm);
+       ldt_arch_exit_mmap(mm);
 }
 
 #ifdef CONFIG_X86_64
@@ -281,33 +337,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
        return __pkru_allows_pkey(vma_pkey(vma), write);
 }
 
-/*
- * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
- * bits.  This serves two purposes.  It prevents a nasty situation in
- * which PCID-unaware code saves CR3, loads some other value (with PCID
- * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
- * the saved ASID was nonzero.  It also means that any bugs involving
- * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
- * deterministically.
- */
-
-static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
-{
-       if (static_cpu_has(X86_FEATURE_PCID)) {
-               VM_WARN_ON_ONCE(asid > 4094);
-               return __sme_pa(mm->pgd) | (asid + 1);
-       } else {
-               VM_WARN_ON_ONCE(asid != 0);
-               return __sme_pa(mm->pgd);
-       }
-}
-
-static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
-{
-       VM_WARN_ON_ONCE(asid > 4094);
-       return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
-}
-
 /*
  * This can be used from process context to figure out what the value of
  * CR3 is without needing to do a (slow) __read_cr3().
@@ -317,7 +346,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
  */
 static inline unsigned long __get_current_cr3_fast(void)
 {
-       unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
+       unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
                this_cpu_read(cpu_tlbstate.loaded_mm_asid));
 
        /* For now, be very restrictive about when this can be called. */
index 5400add..8bf450b 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/nmi.h>
 #include <asm/io.h>
 #include <asm/hyperv.h>
+#include <asm/nospec-branch.h>
 
 /*
  * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
                return U64_MAX;
 
        __asm__ __volatile__("mov %4, %%r8\n"
-                            "call *%5"
+                            CALL_NOSPEC
                             : "=a" (hv_status), ASM_CALL_CONSTRAINT,
                               "+c" (control), "+d" (input_address)
-                            :  "r" (output_address), "m" (hv_hypercall_pg)
+                            :  "r" (output_address),
+                               THUNK_TARGET(hv_hypercall_pg)
                             : "cc", "memory", "r8", "r9", "r10", "r11");
 #else
        u32 input_address_hi = upper_32_bits(input_address);
@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
        if (!hv_hypercall_pg)
                return U64_MAX;
 
-       __asm__ __volatile__("call *%7"
+       __asm__ __volatile__(CALL_NOSPEC
                             : "=A" (hv_status),
                               "+c" (input_address_lo), ASM_CALL_CONSTRAINT
                             : "A" (control),
                               "b" (input_address_hi),
                               "D"(output_address_hi), "S"(output_address_lo),
-                              "m" (hv_hypercall_pg)
+                              THUNK_TARGET(hv_hypercall_pg)
                             : "cc", "memory");
 #endif /* !x86_64 */
        return hv_status;
@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
 
 #ifdef CONFIG_X86_64
        {
-               __asm__ __volatile__("call *%4"
+               __asm__ __volatile__(CALL_NOSPEC
                                     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
                                       "+c" (control), "+d" (input1)
-                                    : "m" (hv_hypercall_pg)
+                                    : THUNK_TARGET(hv_hypercall_pg)
                                     : "cc", "r8", "r9", "r10", "r11");
        }
 #else
@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
                u32 input1_hi = upper_32_bits(input1);
                u32 input1_lo = lower_32_bits(input1);
 
-               __asm__ __volatile__ ("call *%5"
+               __asm__ __volatile__ (CALL_NOSPEC
                                      : "=A"(hv_status),
                                        "+c"(input1_lo),
                                        ASM_CALL_CONSTRAINT
                                      : "A" (control),
                                        "b" (input1_hi),
-                                       "m" (hv_hypercall_pg)
+                                       THUNK_TARGET(hv_hypercall_pg)
                                      : "cc", "edi", "esi");
        }
 #endif
index 34c4922..e7b983a 100644 (file)
 #define FAM10H_MMIO_CONF_BASE_MASK     0xfffffffULL
 #define FAM10H_MMIO_CONF_BASE_SHIFT    20
 #define MSR_FAM10H_NODE_ID             0xc001100c
+#define MSR_F10H_DECFG                 0xc0011029
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT    1
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE                BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
 
 /* K8 MSRs */
 #define MSR_K8_TOP_MEM1                        0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
new file mode 100644 (file)
index 0000000..4ad4108
--- /dev/null
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NOSPEC_BRANCH_H__
+#define __NOSPEC_BRANCH_H__
+
+#include <asm/alternative.h>
+#include <asm/alternative-asm.h>
+#include <asm/cpufeatures.h>
+
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * We define a CPP macro such that it can be used from both .S files and
+ * inline assembly. It's possible to do a .macro and then include that
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ */
+
+#define RSB_CLEAR_LOOPS                32      /* To forcibly overwrite all entries */
+#define RSB_FILL_LOOPS         16      /* To avoid underflow */
+
+/*
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version â€” two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp)      \
+       mov     $(nr/2), reg;                   \
+771:                                           \
+       call    772f;                           \
+773:   /* speculation trap */                  \
+       pause;                                  \
+       lfence;                                 \
+       jmp     773b;                           \
+772:                                           \
+       call    774f;                           \
+775:   /* speculation trap */                  \
+       pause;                                  \
+       lfence;                                 \
+       jmp     775b;                           \
+774:                                           \
+       dec     reg;                            \
+       jnz     771b;                           \
+       add     $(BITS_PER_LONG/8) * nr, sp;
+
+#ifdef __ASSEMBLY__
+
+/*
+ * This should be used immediately before a retpoline alternative.  It tells
+ * objtool where the retpolines are so that it can make sense of the control
+ * flow by just reading the original instruction(s) and ignoring the
+ * alternatives.
+ */
+.macro ANNOTATE_NOSPEC_ALTERNATIVE
+       .Lannotate_\@:
+       .pushsection .discard.nospec
+       .long .Lannotate_\@ - .
+       .popsection
+.endm
+
+/*
+ * These are the bare retpoline primitives for indirect jmp and call.
+ * Do not use these directly; they only exist to make the ALTERNATIVE
+ * invocation below less ugly.
+ */
+.macro RETPOLINE_JMP reg:req
+       call    .Ldo_rop_\@
+.Lspec_trap_\@:
+       pause
+       lfence
+       jmp     .Lspec_trap_\@
+.Ldo_rop_\@:
+       mov     \reg, (%_ASM_SP)
+       ret
+.endm
+
+/*
+ * This is a wrapper around RETPOLINE_JMP so the called function in reg
+ * returns to the instruction after the macro.
+ */
+.macro RETPOLINE_CALL reg:req
+       jmp     .Ldo_call_\@
+.Ldo_retpoline_jmp_\@:
+       RETPOLINE_JMP \reg
+.Ldo_call_\@:
+       call    .Ldo_retpoline_jmp_\@
+.endm
+
+/*
+ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
+ * indirect jmp/call which may be susceptible to the Spectre variant 2
+ * attack.
+ */
+.macro JMP_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+       ANNOTATE_NOSPEC_ALTERNATIVE
+       ALTERNATIVE_2 __stringify(jmp *\reg),                           \
+               __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
+               __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+       jmp     *\reg
+#endif
+.endm
+
+.macro CALL_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+       ANNOTATE_NOSPEC_ALTERNATIVE
+       ALTERNATIVE_2 __stringify(call *\reg),                          \
+               __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
+               __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+       call    *\reg
+#endif
+.endm
+
+ /*
+  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+  * monstrosity above, manually.
+  */
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
+#ifdef CONFIG_RETPOLINE
+       ANNOTATE_NOSPEC_ALTERNATIVE
+       ALTERNATIVE "jmp .Lskip_rsb_\@",                                \
+               __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))    \
+               \ftr
+.Lskip_rsb_\@:
+#endif
+.endm
+
+#else /* __ASSEMBLY__ */
+
+#define ANNOTATE_NOSPEC_ALTERNATIVE                            \
+       "999:\n\t"                                              \
+       ".pushsection .discard.nospec\n\t"                      \
+       ".long 999b - .\n\t"                                    \
+       ".popsection\n\t"
+
+#if defined(CONFIG_X86_64) && defined(RETPOLINE)
+
+/*
+ * Since the inline asm uses the %V modifier which is only in newer GCC,
+ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
+ */
+# define CALL_NOSPEC                                           \
+       ANNOTATE_NOSPEC_ALTERNATIVE                             \
+       ALTERNATIVE(                                            \
+       "call *%[thunk_target]\n",                              \
+       "call __x86_indirect_thunk_%V[thunk_target]\n",         \
+       X86_FEATURE_RETPOLINE)
+# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+
+#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
+/*
+ * For i386 we use the original ret-equivalent retpoline, because
+ * otherwise we'll run out of registers. We don't care about CET
+ * here, anyway.
+ */
+# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",    \
+       "       jmp    904f;\n"                                 \
+       "       .align 16\n"                                    \
+       "901:   call   903f;\n"                                 \
+       "902:   pause;\n"                                       \
+       "       lfence;\n"                                      \
+       "       jmp    902b;\n"                                 \
+       "       .align 16\n"                                    \
+       "903:   addl   $4, %%esp;\n"                            \
+       "       pushl  %[thunk_target];\n"                      \
+       "       ret;\n"                                         \
+       "       .align 16\n"                                    \
+       "904:   call   901b;\n",                                \
+       X86_FEATURE_RETPOLINE)
+
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#else /* No retpoline for C / inline asm */
+# define CALL_NOSPEC "call *%[thunk_target]\n"
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#endif
+
+/* The Spectre V2 mitigation variants */
+enum spectre_v2_mitigation {
+       SPECTRE_V2_NONE,
+       SPECTRE_V2_RETPOLINE_MINIMAL,
+       SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
+       SPECTRE_V2_RETPOLINE_GENERIC,
+       SPECTRE_V2_RETPOLINE_AMD,
+       SPECTRE_V2_IBRS,
+};
+
+extern char __indirect_thunk_start[];
+extern char __indirect_thunk_end[];
+
+/*
+ * On VMEXIT we must ensure that no RSB predictions learned in the guest
+ * can be followed in the host, by overwriting the RSB completely. Both
+ * retpoline and IBRS mitigations for Spectre v2 need this; only on future
+ * CPUs with IBRS_ATT *might* it be avoided.
+ */
+static inline void vmexit_fill_RSB(void)
+{
+#ifdef CONFIG_RETPOLINE
+       unsigned long loops;
+
+       asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+                     ALTERNATIVE("jmp 910f",
+                                 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+                                 X86_FEATURE_RETPOLINE)
+                     "910:"
+                     : "=r" (loops), ASM_CALL_CONSTRAINT
+                     : : "memory" );
+#endif
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __NOSPEC_BRANCH_H__ */
index 283efca..892df37 100644 (file)
@@ -927,6 +927,15 @@ extern void default_banner(void);
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
                  CLBR_NONE,                                            \
                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(clobbers)                                        \
+       PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
+                 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl);    \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+#endif
+
 #endif /* CONFIG_X86_32 */
 
 #endif /* __ASSEMBLY__ */
index 7a5d669..eb66fa9 100644 (file)
@@ -38,6 +38,7 @@ do {                                          \
 #define PCI_NOASSIGN_ROMS      0x80000
 #define PCI_ROOT_NO_CRS                0x100000
 #define PCI_NOASSIGN_BARS      0x200000
+#define PCI_BIG_ROOT_WINDOW    0x400000
 
 extern unsigned int pci_probe;
 extern unsigned long pirq_table_addr;
index 4b5e1ea..aff42e1 100644 (file)
@@ -30,6 +30,17 @@ static inline void paravirt_release_p4d(unsigned long pfn) {}
  */
 extern gfp_t __userpte_alloc_gfp;
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Instead of one PGD, we acquire two PGDs.  Being order-1, it is
+ * both 8k in size and 8k-aligned.  That lets us just flip bit 12
+ * in a pointer to swap between the two 4k halves.
+ */
+#define PGD_ALLOCATION_ORDER 1
+#else
+#define PGD_ALLOCATION_ORDER 0
+#endif
+
 /*
  * Allocate and free page tables.
  */
index 09f9e1e..e42b894 100644 (file)
@@ -28,6 +28,7 @@ extern pgd_t early_top_pgt[PTRS_PER_PGD];
 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
 
 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
+void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
 void ptdump_walk_pgd_level_checkwx(void);
 
 #ifdef CONFIG_DEBUG_WX
@@ -841,7 +842,12 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
 
 static inline int p4d_bad(p4d_t p4d)
 {
-       return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
+       unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
+
+       if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+               ignore_flags |= _PAGE_NX;
+
+       return (p4d_flags(p4d) & ~ignore_flags) != 0;
 }
 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 
@@ -875,7 +881,12 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 
 static inline int pgd_bad(pgd_t pgd)
 {
-       return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+       unsigned long ignore_flags = _PAGE_USER;
+
+       if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+               ignore_flags |= _PAGE_NX;
+
+       return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
 }
 
 static inline int pgd_none(pgd_t pgd)
@@ -904,7 +915,11 @@ static inline int pgd_none(pgd_t pgd)
  * pgd_offset() returns a (pgd_t *)
  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  */
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
+#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
+/*
+ * a shortcut to get a pgd_t in a given mm
+ */
+#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
 /*
  * a shortcut which implies the use of the kernel's pgd, instead
  * of a process's
@@ -1061,7 +1076,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
                                  unsigned long address, pmd_t *pmdp);
 
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return pmd_flags(pmd) & _PAGE_RW;
@@ -1088,6 +1103,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
        clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
 }
 
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_RW;
+}
+
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
@@ -1100,7 +1121,14 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  */
 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
 {
-       memcpy(dst, src, count * sizeof(pgd_t));
+       memcpy(dst, src, count * sizeof(pgd_t));
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+       /* Clone the user space pgd as well */
+       memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
+              count * sizeof(pgd_t));
+#endif
 }
 
 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
index f2ca9b2..ce245b0 100644 (file)
@@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
 #define LAST_PKMAP 1024
 #endif
 
-#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1))     \
-                   & PMD_MASK)
+/*
+ * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
+ * to avoid include recursion hell
+ */
+#define CPU_ENTRY_AREA_PAGES   (NR_CPUS * 40)
+
+#define CPU_ENTRY_AREA_BASE                            \
+       ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK)
+
+#define PKMAP_BASE             \
+       ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
 
 #ifdef CONFIG_HIGHMEM
 # define VMALLOC_END   (PKMAP_BASE - 2 * PAGE_SIZE)
 #else
-# define VMALLOC_END   (FIXADDR_START - 2 * PAGE_SIZE)
+# define VMALLOC_END   (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
 #endif
 
 #define MODULES_VADDR  VMALLOC_START
index e9f0533..81462e9 100644 (file)
@@ -131,9 +131,97 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp)
 #endif
 }
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
+ * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
+ * the user one is in the last 4k.  To switch between them, you
+ * just need to flip the 12th bit in their addresses.
+ */
+#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
+
+/*
+ * This generates better code than the inline assembly in
+ * __set_bit().
+ */
+static inline void *ptr_set_bit(void *ptr, int bit)
+{
+       unsigned long __ptr = (unsigned long)ptr;
+
+       __ptr |= BIT(bit);
+       return (void *)__ptr;
+}
+static inline void *ptr_clear_bit(void *ptr, int bit)
+{
+       unsigned long __ptr = (unsigned long)ptr;
+
+       __ptr &= ~BIT(bit);
+       return (void *)__ptr;
+}
+
+static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
+{
+       return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
+{
+       return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
+{
+       return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
+{
+       return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
+/*
+ * Page table pages are page-aligned.  The lower half of the top
+ * level is used for userspace and the top half for the kernel.
+ *
+ * Returns true for parts of the PGD that map userspace and
+ * false for the parts that map the kernel.
+ */
+static inline bool pgdp_maps_userspace(void *__ptr)
+{
+       unsigned long ptr = (unsigned long)__ptr;
+
+       return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
+}
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
+
+/*
+ * Take a PGD location (pgdp) and a pgd value that needs to be set there.
+ * Populates the user and returns the resulting PGD that must be set in
+ * the kernel copy of the page tables.
+ */
+static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return pgd;
+       return __pti_set_user_pgd(pgdp, pgd);
+}
+#else
+static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+       return pgd;
+}
+#endif
+
 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
 {
+#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL)
+       p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd);
+#else
        *p4dp = p4d;
+#endif
 }
 
 static inline void native_p4d_clear(p4d_t *p4d)
@@ -147,7 +235,11 @@ static inline void native_p4d_clear(p4d_t *p4d)
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       *pgdp = pti_set_user_pgd(pgdp, pgd);
+#else
        *pgdp = pgd;
+#endif
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
index 6d5f45d..6b8f73d 100644 (file)
@@ -75,33 +75,52 @@ typedef struct { pteval_t pte; } pte_t;
 #define PGDIR_SIZE     (_AC(1, UL) << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE - 1))
 
-/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
-#define MAXMEM         _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+/*
+ * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
+ *
+ * Be very careful vs. KASLR when changing anything here. The KASLR address
+ * range must not overlap with anything except the KASAN shadow area, which
+ * is correct as KASAN disables KASLR.
+ */
+#define MAXMEM                 _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+
 #ifdef CONFIG_X86_5LEVEL
-#define VMALLOC_SIZE_TB _AC(16384, UL)
-#define __VMALLOC_BASE _AC(0xff92000000000000, UL)
-#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL)
+# define VMALLOC_SIZE_TB       _AC(12800, UL)
+# define __VMALLOC_BASE                _AC(0xffa0000000000000, UL)
+# define __VMEMMAP_BASE                _AC(0xffd4000000000000, UL)
+# define LDT_PGD_ENTRY         _AC(-112, UL)
+# define LDT_BASE_ADDR         (LDT_PGD_ENTRY << PGDIR_SHIFT)
 #else
-#define VMALLOC_SIZE_TB        _AC(32, UL)
-#define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
-#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
+# define VMALLOC_SIZE_TB       _AC(32, UL)
+# define __VMALLOC_BASE                _AC(0xffffc90000000000, UL)
+# define __VMEMMAP_BASE                _AC(0xffffea0000000000, UL)
+# define LDT_PGD_ENTRY         _AC(-3, UL)
+# define LDT_BASE_ADDR         (LDT_PGD_ENTRY << PGDIR_SHIFT)
 #endif
+
 #ifdef CONFIG_RANDOMIZE_MEMORY
-#define VMALLOC_START  vmalloc_base
-#define VMEMMAP_START  vmemmap_base
+# define VMALLOC_START         vmalloc_base
+# define VMEMMAP_START         vmemmap_base
 #else
-#define VMALLOC_START  __VMALLOC_BASE
-#define VMEMMAP_START  __VMEMMAP_BASE
+# define VMALLOC_START         __VMALLOC_BASE
+# define VMEMMAP_START         __VMEMMAP_BASE
 #endif /* CONFIG_RANDOMIZE_MEMORY */
-#define VMALLOC_END    (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
-#define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+
+#define VMALLOC_END            (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
+
+#define MODULES_VADDR          (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
 /* The module sections ends with the start of the fixmap */
-#define MODULES_END   __fix_to_virt(__end_of_fixed_addresses + 1)
-#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
-#define ESPFIX_PGD_ENTRY _AC(-2, UL)
-#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
-#define EFI_VA_START    ( -4 * (_AC(1, UL) << 30))
-#define EFI_VA_END      (-68 * (_AC(1, UL) << 30))
+#define MODULES_END            _AC(0xffffffffff000000, UL)
+#define MODULES_LEN            (MODULES_END - MODULES_VADDR)
+
+#define ESPFIX_PGD_ENTRY       _AC(-2, UL)
+#define ESPFIX_BASE_ADDR       (ESPFIX_PGD_ENTRY << P4D_SHIFT)
+
+#define CPU_ENTRY_AREA_PGD     _AC(-4, UL)
+#define CPU_ENTRY_AREA_BASE    (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
+
+#define EFI_VA_START           ( -4 * (_AC(1, UL) << 30))
+#define EFI_VA_END             (-68 * (_AC(1, UL) << 30))
 
 #define EARLY_DYNAMIC_PAGE_TABLES      64
 
index 43212a4..625a52a 100644 (file)
 #define CR3_ADDR_MASK  __sme_clr(0x7FFFFFFFFFFFF000ull)
 #define CR3_PCID_MASK  0xFFFull
 #define CR3_NOFLUSH    BIT_ULL(63)
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define X86_CR3_PTI_PCID_USER_BIT     11
+#endif
+
 #else
 /*
  * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
index cc16fa8..d3a67fb 100644 (file)
@@ -163,9 +163,9 @@ enum cpuid_regs_idx {
 extern struct cpuinfo_x86      boot_cpu_data;
 extern struct cpuinfo_x86      new_cpu_data;
 
-extern struct tss_struct       doublefault_tss;
-extern __u32                   cpu_caps_cleared[NCAPINTS];
-extern __u32                   cpu_caps_set[NCAPINTS];
+extern struct x86_hw_tss       doublefault_tss;
+extern __u32                   cpu_caps_cleared[NCAPINTS + NBUGINTS];
+extern __u32                   cpu_caps_set[NCAPINTS + NBUGINTS];
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
@@ -253,6 +253,11 @@ static inline void load_cr3(pgd_t *pgdir)
        write_cr3(__sme_pa(pgdir));
 }
 
+/*
+ * Note that while the legacy 'TSS' name comes from 'Task State Segment',
+ * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
+ * unrelated to the task-switch mechanism:
+ */
 #ifdef CONFIG_X86_32
 /* This is the TSS defined by the hardware. */
 struct x86_hw_tss {
@@ -305,7 +310,13 @@ struct x86_hw_tss {
 struct x86_hw_tss {
        u32                     reserved1;
        u64                     sp0;
+
+       /*
+        * We store cpu_current_top_of_stack in sp1 so it's always accessible.
+        * Linux does not use ring 1, so sp1 is not otherwise needed.
+        */
        u64                     sp1;
+
        u64                     sp2;
        u64                     reserved2;
        u64                     ist[7];
@@ -323,12 +334,22 @@ struct x86_hw_tss {
 #define IO_BITMAP_BITS                 65536
 #define IO_BITMAP_BYTES                        (IO_BITMAP_BITS/8)
 #define IO_BITMAP_LONGS                        (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET               offsetof(struct tss_struct, io_bitmap)
+#define IO_BITMAP_OFFSET               (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
 #define INVALID_IO_BITMAP_OFFSET       0x8000
 
+struct entry_stack {
+       unsigned long           words[64];
+};
+
+struct entry_stack_page {
+       struct entry_stack stack;
+} __aligned(PAGE_SIZE);
+
 struct tss_struct {
        /*
-        * The hardware state:
+        * The fixed hardware portion.  This must not cross a page boundary
+        * at risk of violating the SDM's advice and potentially triggering
+        * errata.
         */
        struct x86_hw_tss       x86_tss;
 
@@ -339,18 +360,9 @@ struct tss_struct {
         * be within the limit.
         */
        unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
+} __aligned(PAGE_SIZE);
 
-#ifdef CONFIG_X86_32
-       /*
-        * Space for the temporary SYSENTER stack.
-        */
-       unsigned long           SYSENTER_stack_canary;
-       unsigned long           SYSENTER_stack[64];
-#endif
-
-} ____cacheline_aligned;
-
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
 
 /*
  * sizeof(unsigned long) coming from an extra "long" at the end
@@ -364,6 +376,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
 
 #ifdef CONFIG_X86_32
 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+#else
+/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
+#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
 #endif
 
 /*
@@ -523,7 +538,7 @@ static inline void native_set_iopl_mask(unsigned mask)
 static inline void
 native_load_sp0(unsigned long sp0)
 {
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 static inline void native_swapgs(void)
@@ -535,12 +550,12 @@ static inline void native_swapgs(void)
 
 static inline unsigned long current_top_of_stack(void)
 {
-#ifdef CONFIG_X86_64
-       return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
-#else
-       /* sp0 on x86_32 is special in and around vm86 mode. */
+       /*
+        *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
+        *  and around vm86 mode and sp0 on x86_64 is special because of the
+        *  entry trampoline.
+        */
        return this_cpu_read_stable(cpu_current_top_of_stack);
-#endif
 }
 
 static inline bool on_thread_stack(void)
@@ -837,13 +852,22 @@ static inline void spin_lock_prefetch(const void *x)
 
 #else
 /*
- * User space process size. 47bits minus one guard page.  The guard
- * page is necessary on Intel CPUs: if a SYSCALL instruction is at
- * the highest possible canonical userspace address, then that
- * syscall will enter the kernel with a non-canonical return
- * address, and SYSRET will explode dangerously.  We avoid this
- * particular problem by preventing anything from being mapped
- * at the maximum canonical address.
+ * User space process size.  This is the first address outside the user range.
+ * There are a few constraints that determine this:
+ *
+ * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
+ * address, then that syscall will enter the kernel with a
+ * non-canonical return address, and SYSRET will explode dangerously.
+ * We avoid this particular problem by preventing anything executable
+ * from being mapped at the maximum canonical address.
+ *
+ * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
+ * CPUs malfunction if they execute code from the highest canonical page.
+ * They'll speculate right off the end of the canonical space, and
+ * bad things happen.  This is worked around in the same way as the
+ * Intel problem.
+ *
+ * With page table isolation enabled, we map the LDT in ... [stay tuned]
  */
 #define TASK_SIZE_MAX  ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
 
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
new file mode 100644 (file)
index 0000000..0b5ef05
--- /dev/null
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _ASM_X86_PTI_H
+#define _ASM_X86_PTI_H
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+extern void pti_init(void);
+extern void pti_check_boottime_disable(void);
+#else
+static inline void pti_check_boottime_disable(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_PTI_H */
index b20f9d6..8f09012 100644 (file)
  */
 #define EARLY_IDT_HANDLER_SIZE 9
 
+/*
+ * xen_early_idt_handler_array is for Xen pv guests: for each entry in
+ * early_idt_handler_array it contains a prequel in the form of
+ * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
+ * max 8 bytes.
+ */
+#define XEN_EARLY_IDT_HANDLER_SIZE 8
+
 #ifndef __ASSEMBLY__
 
 extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
 extern void early_ignore_irq(void);
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
+extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
+#endif
+
 /*
  * Load a segment. Fall back on loading the zero segment if something goes
  * wrong.  This variant assumes that loading zero fully clears the segment.
index 8da111b..f737068 100644 (file)
@@ -16,6 +16,7 @@ enum stack_type {
        STACK_TYPE_TASK,
        STACK_TYPE_IRQ,
        STACK_TYPE_SOFTIRQ,
+       STACK_TYPE_ENTRY,
        STACK_TYPE_EXCEPTION,
        STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
 };
@@ -28,6 +29,8 @@ struct stack_info {
 bool in_task_stack(unsigned long *stack, struct task_struct *task,
                   struct stack_info *info);
 
+bool in_entry_stack(unsigned long *stack, struct stack_info *info);
+
 int get_stack_info(unsigned long *stack, struct task_struct *task,
                   struct stack_info *info, unsigned long *visit_mask);
 
index 982c325..8be6afb 100644 (file)
 
 /* image of the saved processor state */
 struct saved_context {
-       u16 es, fs, gs, ss;
+       /*
+        * On x86_32, all segment registers, with the possible exception of
+        * gs, are saved at kernel entry in pt_regs.
+        */
+#ifdef CONFIG_X86_32_LAZY_GS
+       u16 gs;
+#endif
        unsigned long cr0, cr2, cr3, cr4;
        u64 misc_enable;
        bool misc_enable_saved;
index 7306e91..a7af9f5 100644 (file)
  */
 struct saved_context {
        struct pt_regs regs;
-       u16 ds, es, fs, gs, ss;
-       unsigned long gs_base, gs_kernel_base, fs_base;
+
+       /*
+        * User CS and SS are saved in current_pt_regs().  The rest of the
+        * segment selectors need to be saved and restored here.
+        */
+       u16 ds, es, fs, gs;
+
+       /*
+        * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
+        * so we save them separately.  We save the kernelmode GSBASE to
+        * restore percpu access after resume.
+        */
+       unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
+
        unsigned long cr0, cr2, cr3, cr4, cr8;
        u64 misc_enable;
        bool misc_enable_saved;
@@ -30,8 +42,7 @@ struct saved_context {
        u16 gdt_pad; /* Unused */
        struct desc_ptr gdt_desc;
        u16 idt_pad;
-       u16 idt_limit;
-       unsigned long idt_base;
+       struct desc_ptr idt;
        u16 ldt;
        u16 tss;
        unsigned long tr;
index 8c6bd68..eb5f799 100644 (file)
@@ -16,8 +16,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                      struct tss_struct *tss);
 
 /* This runs runs on the previous thread's stack. */
-static inline void prepare_switch_to(struct task_struct *prev,
-                                    struct task_struct *next)
+static inline void prepare_switch_to(struct task_struct *next)
 {
 #ifdef CONFIG_VMAP_STACK
        /*
@@ -70,7 +69,7 @@ struct fork_frame {
 
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
-       prepare_switch_to(prev, next);                                  \
+       prepare_switch_to(next);                                        \
                                                                        \
        ((last) = __switch_to_asm((prev), (next)));                     \
 } while (0)
@@ -79,10 +78,10 @@ do {                                                                        \
 static inline void refresh_sysenter_cs(struct thread_struct *thread)
 {
        /* Only happens when SEP is enabled, no need to test "SEP"arately: */
-       if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
+       if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
                return;
 
-       this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
+       this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
        wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
 }
 #endif
@@ -90,10 +89,12 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
 /* This is used when switching tasks or entering/exiting vm86 mode. */
 static inline void update_sp0(struct task_struct *task)
 {
+       /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
 #ifdef CONFIG_X86_32
        load_sp0(task->thread.sp0);
 #else
-       load_sp0(task_top_of_stack(task));
+       if (static_cpu_has(X86_FEATURE_XENPV))
+               load_sp0(task_top_of_stack(task));
 #endif
 }
 
index 70f4259..d25a638 100644 (file)
@@ -62,8 +62,6 @@ struct thread_info {
        .flags          = 0,                    \
 }
 
-#define init_stack             (init_thread_union.stack)
-
 #else /* !__ASSEMBLY__ */
 
 #include <asm/asm-offsets.h>
@@ -207,7 +205,7 @@ static inline int arch_within_stack_frames(const void * const stack,
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
-# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
 #endif
 
 #endif
index 509046c..d33e4a2 100644 (file)
 #include <asm/cpufeature.h>
 #include <asm/special_insns.h>
 #include <asm/smp.h>
+#include <asm/invpcid.h>
+#include <asm/pti.h>
+#include <asm/processor-flags.h>
 
-static inline void __invpcid(unsigned long pcid, unsigned long addr,
-                            unsigned long type)
-{
-       struct { u64 d[2]; } desc = { { pcid, addr } };
+/*
+ * The x86 feature is called PCID (Process Context IDentifier). It is similar
+ * to what is traditionally called ASID on the RISC processors.
+ *
+ * We don't use the traditional ASID implementation, where each process/mm gets
+ * its own ASID and flush/restart when we run out of ASID space.
+ *
+ * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
+ * that came by on this CPU, allowing cheaper switch_mm between processes on
+ * this CPU.
+ *
+ * We end up with different spaces for different things. To avoid confusion we
+ * use different names for each of them:
+ *
+ * ASID  - [0, TLB_NR_DYN_ASIDS-1]
+ *         the canonical identifier for an mm
+ *
+ * kPCID - [1, TLB_NR_DYN_ASIDS]
+ *         the value we write into the PCID part of CR3; corresponds to the
+ *         ASID+1, because PCID 0 is special.
+ *
+ * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
+ *         for KPTI each mm has two address spaces and thus needs two
+ *         PCID values, but we can still do with a single ASID denomination
+ *         for each mm. Corresponds to kPCID + 2048.
+ *
+ */
 
-       /*
-        * The memory clobber is because the whole point is to invalidate
-        * stale TLB entries and, especially if we're flushing global
-        * mappings, we don't want the compiler to reorder any subsequent
-        * memory accesses before the TLB flush.
-        *
-        * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
-        * invpcid (%rcx), %rax in long mode.
-        */
-       asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
-                     : : "m" (desc), "a" (type), "c" (&desc) : "memory");
-}
+/* There are 12 bits of space for ASIDS in CR3 */
+#define CR3_HW_ASID_BITS               12
 
-#define INVPCID_TYPE_INDIV_ADDR                0
-#define INVPCID_TYPE_SINGLE_CTXT       1
-#define INVPCID_TYPE_ALL_INCL_GLOBAL   2
-#define INVPCID_TYPE_ALL_NON_GLOBAL    3
+/*
+ * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
+ * user/kernel switches
+ */
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define PTI_CONSUMED_PCID_BITS        1
+#else
+# define PTI_CONSUMED_PCID_BITS        0
+#endif
 
-/* Flush all mappings for a given pcid and addr, not including globals. */
-static inline void invpcid_flush_one(unsigned long pcid,
-                                    unsigned long addr)
-{
-       __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
-}
+#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
+
+/*
+ * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid.  -1 below to account
+ * for them being zero-based.  Another -1 is because PCID 0 is reserved for
+ * use by non-PCID-aware users.
+ */
+#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
 
-/* Flush all mappings for a given PCID, not including globals. */
-static inline void invpcid_flush_single_context(unsigned long pcid)
+/*
+ * 6 because 6 should be plenty and struct tlb_state will fit in two cache
+ * lines.
+ */
+#define TLB_NR_DYN_ASIDS       6
+
+/*
+ * Given @asid, compute kPCID
+ */
+static inline u16 kern_pcid(u16 asid)
 {
-       __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
+       VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       /*
+        * Make sure that the dynamic ASID space does not confict with the
+        * bit we are using to switch between user and kernel ASIDs.
+        */
+       BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
+
+       /*
+        * The ASID being passed in here should have respected the
+        * MAX_ASID_AVAILABLE and thus never have the switch bit set.
+        */
+       VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
+#endif
+       /*
+        * The dynamically-assigned ASIDs that get passed in are small
+        * (<TLB_NR_DYN_ASIDS).  They never have the high switch bit set,
+        * so do not bother to clear it.
+        *
+        * If PCID is on, ASID-aware code paths put the ASID+1 into the
+        * PCID bits.  This serves two purposes.  It prevents a nasty
+        * situation in which PCID-unaware code saves CR3, loads some other
+        * value (with PCID == 0), and then restores CR3, thus corrupting
+        * the TLB for ASID 0 if the saved ASID was nonzero.  It also means
+        * that any bugs involving loading a PCID-enabled CR3 with
+        * CR4.PCIDE off will trigger deterministically.
+        */
+       return asid + 1;
 }
 
-/* Flush all mappings, including globals, for all PCIDs. */
-static inline void invpcid_flush_all(void)
+/*
+ * Given @asid, compute uPCID
+ */
+static inline u16 user_pcid(u16 asid)
 {
-       __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
+       u16 ret = kern_pcid(asid);
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
+#endif
+       return ret;
 }
 
-/* Flush all mappings for all PCIDs except globals. */
-static inline void invpcid_flush_all_nonglobals(void)
+struct pgd_t;
+static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
 {
-       __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
+       if (static_cpu_has(X86_FEATURE_PCID)) {
+               return __sme_pa(pgd) | kern_pcid(asid);
+       } else {
+               VM_WARN_ON_ONCE(asid != 0);
+               return __sme_pa(pgd);
+       }
 }
 
-static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
+static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
 {
-       u64 new_tlb_gen;
-
-       /*
-        * Bump the generation count.  This also serves as a full barrier
-        * that synchronizes with switch_mm(): callers are required to order
-        * their read of mm_cpumask after their writes to the paging
-        * structures.
-        */
-       smp_mb__before_atomic();
-       new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
-       smp_mb__after_atomic();
-
-       return new_tlb_gen;
+       VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+       VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
+       return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
 }
 
 #ifdef CONFIG_PARAVIRT
@@ -99,12 +159,6 @@ static inline bool tlb_defer_switch_to_init_mm(void)
        return !static_cpu_has(X86_FEATURE_PCID);
 }
 
-/*
- * 6 because 6 should be plenty and struct tlb_state will fit in
- * two cache lines.
- */
-#define TLB_NR_DYN_ASIDS 6
-
 struct tlb_context {
        u64 ctx_id;
        u64 tlb_gen;
@@ -138,6 +192,24 @@ struct tlb_state {
         */
        bool is_lazy;
 
+       /*
+        * If set we changed the page tables in such a way that we
+        * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
+        * This tells us to go invalidate all the non-loaded ctxs[]
+        * on the next context switch.
+        *
+        * The current ctx was kept up-to-date as it ran and does not
+        * need to be invalidated.
+        */
+       bool invalidate_other;
+
+       /*
+        * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
+        * the corresponding user PCID needs a flush next time we
+        * switch to it; see SWITCH_TO_USER_CR3.
+        */
+       unsigned short user_pcid_flush_mask;
+
        /*
         * Access to this CR4 shadow and to H/W CR4 is protected by
         * disabling interrupts when modifying either one.
@@ -173,40 +245,43 @@ static inline void cr4_init_shadow(void)
        this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
 }
 
+static inline void __cr4_set(unsigned long cr4)
+{
+       lockdep_assert_irqs_disabled();
+       this_cpu_write(cpu_tlbstate.cr4, cr4);
+       __write_cr4(cr4);
+}
+
 /* Set in this cpu's CR4. */
 static inline void cr4_set_bits(unsigned long mask)
 {
-       unsigned long cr4;
+       unsigned long cr4, flags;
 
+       local_irq_save(flags);
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       if ((cr4 | mask) != cr4) {
-               cr4 |= mask;
-               this_cpu_write(cpu_tlbstate.cr4, cr4);
-               __write_cr4(cr4);
-       }
+       if ((cr4 | mask) != cr4)
+               __cr4_set(cr4 | mask);
+       local_irq_restore(flags);
 }
 
 /* Clear in this cpu's CR4. */
 static inline void cr4_clear_bits(unsigned long mask)
 {
-       unsigned long cr4;
+       unsigned long cr4, flags;
 
+       local_irq_save(flags);
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       if ((cr4 & ~mask) != cr4) {
-               cr4 &= ~mask;
-               this_cpu_write(cpu_tlbstate.cr4, cr4);
-               __write_cr4(cr4);
-       }
+       if ((cr4 & ~mask) != cr4)
+               __cr4_set(cr4 & ~mask);
+       local_irq_restore(flags);
 }
 
-static inline void cr4_toggle_bits(unsigned long mask)
+static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
 {
        unsigned long cr4;
 
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       cr4 ^= mask;
-       this_cpu_write(cpu_tlbstate.cr4, cr4);
-       __write_cr4(cr4);
+       __cr4_set(cr4 ^ mask);
 }
 
 /* Read the CR4 shadow. */
@@ -215,6 +290,14 @@ static inline unsigned long cr4_read_shadow(void)
        return this_cpu_read(cpu_tlbstate.cr4);
 }
 
+/*
+ * Mark all other ASIDs as invalid, preserves the current.
+ */
+static inline void invalidate_other_asid(void)
+{
+       this_cpu_write(cpu_tlbstate.invalidate_other, true);
+}
+
 /*
  * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
  * enable and PPro Global page enable), so that any CPU's that boot
@@ -234,37 +317,63 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
 
 extern void initialize_tlbstate_and_flush(void);
 
-static inline void __native_flush_tlb(void)
+/*
+ * Given an ASID, flush the corresponding user ASID.  We can delay this
+ * until the next time we switch to it.
+ *
+ * See SWITCH_TO_USER_CR3.
+ */
+static inline void invalidate_user_asid(u16 asid)
 {
+       /* There is no user ASID if address space separation is off */
+       if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+               return;
+
        /*
-        * If current->mm == NULL then we borrow a mm which may change during a
-        * task switch and therefore we must not be preempted while we write CR3
-        * back:
+        * We only have a single ASID if PCID is off and the CR3
+        * write will have flushed it.
         */
-       preempt_disable();
-       native_write_cr3(__native_read_cr3());
-       preempt_enable();
+       if (!cpu_feature_enabled(X86_FEATURE_PCID))
+               return;
+
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+
+       __set_bit(kern_pcid(asid),
+                 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
 }
 
-static inline void __native_flush_tlb_global_irq_disabled(void)
+/*
+ * flush the entire current user mapping
+ */
+static inline void __native_flush_tlb(void)
 {
-       unsigned long cr4;
+       /*
+        * Preemption or interrupts must be disabled to protect the access
+        * to the per CPU variable and to prevent being preempted between
+        * read_cr3() and write_cr3().
+        */
+       WARN_ON_ONCE(preemptible());
 
-       cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       /* clear PGE */
-       native_write_cr4(cr4 & ~X86_CR4_PGE);
-       /* write old PGE again and flush TLBs */
-       native_write_cr4(cr4);
+       invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+
+       /* If current->mm == NULL then the read_cr3() "borrows" an mm */
+       native_write_cr3(__native_read_cr3());
 }
 
+/*
+ * flush everything
+ */
 static inline void __native_flush_tlb_global(void)
 {
-       unsigned long flags;
+       unsigned long cr4, flags;
 
        if (static_cpu_has(X86_FEATURE_INVPCID)) {
                /*
                 * Using INVPCID is considerably faster than a pair of writes
                 * to CR4 sandwiched inside an IRQ flag save/restore.
+                *
+                * Note, this works with CR4.PCIDE=0 or 1.
                 */
                invpcid_flush_all();
                return;
@@ -277,36 +386,69 @@ static inline void __native_flush_tlb_global(void)
         */
        raw_local_irq_save(flags);
 
-       __native_flush_tlb_global_irq_disabled();
+       cr4 = this_cpu_read(cpu_tlbstate.cr4);
+       /* toggle PGE */
+       native_write_cr4(cr4 ^ X86_CR4_PGE);
+       /* write old PGE again and flush TLBs */
+       native_write_cr4(cr4);
 
        raw_local_irq_restore(flags);
 }
 
+/*
+ * flush one page in the user mapping
+ */
 static inline void __native_flush_tlb_single(unsigned long addr)
 {
+       u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+
        asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+
+       /*
+        * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
+        * Just use invalidate_user_asid() in case we are called early.
+        */
+       if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
+               invalidate_user_asid(loaded_mm_asid);
+       else
+               invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
 }
 
+/*
+ * flush everything
+ */
 static inline void __flush_tlb_all(void)
 {
-       if (boot_cpu_has(X86_FEATURE_PGE))
+       if (boot_cpu_has(X86_FEATURE_PGE)) {
                __flush_tlb_global();
-       else
+       } else {
+               /*
+                * !PGE -> !PCID (setup_pcid()), thus every flush is total.
+                */
                __flush_tlb();
-
-       /*
-        * Note: if we somehow had PCID but not PGE, then this wouldn't work --
-        * we'd end up flushing kernel translations for the current ASID but
-        * we might fail to flush kernel translations for other cached ASIDs.
-        *
-        * To avoid this issue, we force PCID off if PGE is off.
-        */
+       }
 }
 
+/*
+ * flush one page in the kernel mapping
+ */
 static inline void __flush_tlb_one(unsigned long addr)
 {
        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
        __flush_tlb_single(addr);
+
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+
+       /*
+        * __flush_tlb_single() will have cleared the TLB entry for this ASID,
+        * but since kernel space is replicated across all, we must also
+        * invalidate all others.
+        */
+       invalidate_other_asid();
 }
 
 #define TLB_FLUSH_ALL  -1UL
@@ -367,6 +509,17 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
 void native_flush_tlb_others(const struct cpumask *cpumask,
                             const struct flush_tlb_info *info);
 
+static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
+{
+       /*
+        * Bump the generation count.  This also serves as a full barrier
+        * that synchronizes with switch_mm(): callers are required to order
+        * their read of mm_cpumask after their writes to the paging
+        * structures.
+        */
+       return atomic64_inc_return(&mm->context.tlb_gen);
+}
+
 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
                                        struct mm_struct *mm)
 {
index 84b9ec0..22647a6 100644 (file)
@@ -283,34 +283,34 @@ TRACE_EVENT(vector_alloc_managed,
 DECLARE_EVENT_CLASS(vector_activate,
 
        TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve,
-                bool early),
+                bool reserve),
 
-       TP_ARGS(irq, is_managed, can_reserve, early),
+       TP_ARGS(irq, is_managed, can_reserve, reserve),
 
        TP_STRUCT__entry(
                __field(        unsigned int,   irq             )
                __field(        bool,           is_managed      )
                __field(        bool,           can_reserve     )
-               __field(        bool,           early           )
+               __field(        bool,           reserve         )
        ),
 
        TP_fast_assign(
                __entry->irq            = irq;
                __entry->is_managed     = is_managed;
                __entry->can_reserve    = can_reserve;
-               __entry->early          = early;
+               __entry->reserve        = reserve;
        ),
 
-       TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d",
+       TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d",
                  __entry->irq, __entry->is_managed, __entry->can_reserve,
-                 __entry->early)
+                 __entry->reserve)
 );
 
 #define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name)                         \
 DEFINE_EVENT_FN(vector_activate, name,                                 \
        TP_PROTO(unsigned int irq, bool is_managed,                     \
-                bool can_reserve, bool early),                         \
-       TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL);      \
+                bool can_reserve, bool reserve),                       \
+       TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL);    \
 
 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate);
 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
index 1fadd31..3de6933 100644 (file)
@@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
 dotraplinkage void do_stack_segment(struct pt_regs *, long);
 #ifdef CONFIG_X86_64
 dotraplinkage void do_double_fault(struct pt_regs *, long);
-asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
 #endif
 dotraplinkage void do_general_protection(struct pt_regs *, long);
 dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
@@ -89,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
 #ifdef CONFIG_X86_32
 dotraplinkage void do_iret_error(struct pt_regs *, long);
 #endif
+dotraplinkage void do_mce(struct pt_regs *, long);
 
 static inline int get_si_code(unsigned long condition)
 {
index e9cc6fe..1f86e1b 100644 (file)
@@ -7,6 +7,9 @@
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
+#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip))
+#define IRET_FRAME_SIZE   (sizeof(struct pt_regs) - IRET_FRAME_OFFSET)
+
 struct unwind_state {
        struct stack_info stack_info;
        unsigned long stack_mask;
@@ -52,15 +55,28 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
 }
 
 #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
-static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+/*
+ * If 'partial' returns true, only the iret frame registers are valid.
+ */
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
+                                                   bool *partial)
 {
        if (unwind_done(state))
                return NULL;
 
+       if (partial) {
+#ifdef CONFIG_UNWINDER_ORC
+               *partial = !state->full_regs;
+#else
+               *partial = false;
+#endif
+       }
+
        return state->regs;
 }
 #else
-static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
+                                                   bool *partial)
 {
        return NULL;
 }
index d9a7c65..b986b2c 100644 (file)
@@ -7,6 +7,7 @@
 
 #ifdef CONFIG_X86_VSYSCALL_EMULATION
 extern void map_vsyscall(void);
+extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
 
 /*
  * Called on instruction fetch fault in vsyscall page.
index 7cb282e..bfd8826 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/smap.h>
+#include <asm/nospec-branch.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/sched.h>
@@ -217,9 +218,9 @@ privcmd_call(unsigned call,
        __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
 
        stac();
-       asm volatile("call *%[call]"
+       asm volatile(CALL_NOSPEC
                     : __HYPERCALL_5PARAM
-                    : [call] "a" (&hypercall_page[call])
+                    : [thunk_target] "a" (&hypercall_page[call])
                     : __HYPERCALL_CLOBBER5);
        clac();
 
index da1489c..1e901e4 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generated-y += unistd_32.h
 generated-y += unistd_64.h
 generated-y += unistd_x32.h
index 7e1e730..bcba3c6 100644 (file)
 #define X86_CR3_PWT            _BITUL(X86_CR3_PWT_BIT)
 #define X86_CR3_PCD_BIT                4 /* Page Cache Disable */
 #define X86_CR3_PCD            _BITUL(X86_CR3_PCD_BIT)
-#define X86_CR3_PCID_MASK      _AC(0x00000fff,UL) /* PCID Mask */
+
+#define X86_CR3_PCID_BITS      12
+#define X86_CR3_PCID_MASK      (_AC((1UL << X86_CR3_PCID_BITS) - 1, UL))
+
+#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */
+#define X86_CR3_PCID_NOFLUSH    _BITULL(X86_CR3_PCID_NOFLUSH_BIT)
 
 /*
  * Intel CPU features in CR4
index 81bb565..7e2baf7 100644 (file)
@@ -29,10 +29,13 @@ KASAN_SANITIZE_stacktrace.o                         := n
 KASAN_SANITIZE_paravirt.o                              := n
 
 OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o    := y
-OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o             := y
 OBJECT_FILES_NON_STANDARD_test_nx.o                    := y
 OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o     := y
 
+ifdef CONFIG_FRAME_POINTER
+OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o             := y
+endif
+
 # If instrumentation of this dir is enabled, boot hangs during first second.
 # Probably could be more selective here, but note that files related to irqs,
 # boot, dumpstack/stacktrace, etc are either non-interesting or can lead to
index f4c463d..ec3a286 100644 (file)
@@ -68,8 +68,9 @@ int acpi_ioapic;
 int acpi_strict;
 int acpi_disable_cmcff;
 
+/* ACPI SCI override configuration */
 u8 acpi_sci_flags __initdata;
-int acpi_sci_override_gsi __initdata;
+u32 acpi_sci_override_gsi __initdata = INVALID_ACPI_IRQ;
 int acpi_skip_timer_override __initdata;
 int acpi_use_timer_override __initdata;
 int acpi_fix_pin2_polarity __initdata;
@@ -112,8 +113,6 @@ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
 };
 
-#define        ACPI_INVALID_GSI                INT_MIN
-
 /*
  * This is just a simple wrapper around early_memremap(),
  * with sanity checks for phys == 0 and size == 0.
@@ -372,7 +371,7 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
         * and acpi_isa_irq_to_gsi() may give wrong result.
         */
        if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi)
-               isa_irq_to_gsi[gsi] = ACPI_INVALID_GSI;
+               isa_irq_to_gsi[gsi] = INVALID_ACPI_IRQ;
        isa_irq_to_gsi[bus_irq] = gsi;
 }
 
@@ -620,24 +619,24 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
        }
 
        rc = acpi_get_override_irq(gsi, &trigger, &polarity);
-       if (rc == 0) {
-               trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
-               polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-               irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
-               if (irq >= 0) {
-                       *irqp = irq;
-                       return 0;
-               }
-       }
+       if (rc)
+               return rc;
 
-       return -1;
+       trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+       polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+       irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
+       if (irq < 0)
+               return irq;
+
+       *irqp = irq;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 
 int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
 {
        if (isa_irq < nr_legacy_irqs() &&
-           isa_irq_to_gsi[isa_irq] != ACPI_INVALID_GSI) {
+           isa_irq_to_gsi[isa_irq] != INVALID_ACPI_IRQ) {
                *gsi = isa_irq_to_gsi[isa_irq];
                return 0;
        }
@@ -676,8 +675,7 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
        mutex_lock(&acpi_ioapic_lock);
        irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
        /* Don't set up the ACPI SCI because it's already set up */
-       if (irq >= 0 && enable_update_mptable &&
-           acpi_gbl_FADT.sci_interrupt != gsi)
+       if (irq >= 0 && enable_update_mptable && gsi != acpi_gbl_FADT.sci_interrupt)
                mp_config_acpi_gsi(dev, gsi, trigger, polarity);
        mutex_unlock(&acpi_ioapic_lock);
 #endif
@@ -1211,8 +1209,9 @@ static int __init acpi_parse_madt_ioapic_entries(void)
        /*
         * If BIOS did not supply an INT_SRC_OVR for the SCI
         * pretend we got one so we can set the SCI flags.
+        * But ignore setting up SCI on hardware reduced platforms.
         */
-       if (!acpi_sci_override_gsi)
+       if (acpi_sci_override_gsi == INVALID_ACPI_IRQ && !acpi_gbl_reduced_hardware)
                acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
                                      acpi_gbl_FADT.sci_interrupt);
 
index 7188aea..f1915b7 100644 (file)
@@ -138,6 +138,8 @@ static int __init acpi_sleep_setup(char *str)
                        acpi_nvs_nosave_s3();
                if (strncmp(str, "old_ordering", 12) == 0)
                        acpi_old_suspend_ordering();
+               if (strncmp(str, "nobl", 4) == 0)
+                       acpi_sleep_no_blacklist();
                str = strchr(str, ',');
                if (str != NULL)
                        str += strspn(str, ", \t");
index dbaf14d..4817d74 100644 (file)
@@ -344,9 +344,12 @@ done:
 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 {
        unsigned long flags;
+       int i;
 
-       if (instr[0] != 0x90)
-               return;
+       for (i = 0; i < a->padlen; i++) {
+               if (instr[i] != 0x90)
+                       return;
+       }
 
        local_irq_save(flags);
        add_nops(instr + (a->instrlen - a->padlen), a->padlen);
index 6e272f3..25ddf02 100644 (file)
@@ -1286,6 +1286,55 @@ static int __init apic_intr_mode_select(void)
        return APIC_SYMMETRIC_IO;
 }
 
+/*
+ * An initial setup of the virtual wire mode.
+ */
+void __init init_bsp_APIC(void)
+{
+       unsigned int value;
+
+       /*
+        * Don't do the setup now if we have a SMP BIOS as the
+        * through-I/O-APIC virtual wire mode might be active.
+        */
+       if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
+               return;
+
+       /*
+        * Do not trust the local APIC being empty at bootup.
+        */
+       clear_local_APIC();
+
+       /*
+        * Enable APIC.
+        */
+       value = apic_read(APIC_SPIV);
+       value &= ~APIC_VECTOR_MASK;
+       value |= APIC_SPIV_APIC_ENABLED;
+
+#ifdef CONFIG_X86_32
+       /* This bit is reserved on P4/Xeon and should be cleared */
+       if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+           (boot_cpu_data.x86 == 15))
+               value &= ~APIC_SPIV_FOCUS_DISABLED;
+       else
+#endif
+               value |= APIC_SPIV_FOCUS_DISABLED;
+       value |= SPURIOUS_APIC_VECTOR;
+       apic_write(APIC_SPIV, value);
+
+       /*
+        * Set up the virtual wire mode.
+        */
+       apic_write(APIC_LVT0, APIC_DM_EXTINT);
+       value = APIC_DM_NMI;
+       if (!lapic_is_integrated())             /* 82489DX */
+               value |= APIC_LVT_LEVEL_TRIGGER;
+       if (apic_extnmi == APIC_EXTNMI_NONE)
+               value |= APIC_LVT_MASKED;
+       apic_write(APIC_LVT1, value);
+}
+
 /* Init the interrupt delivery mode for the BSP */
 void __init apic_intr_mode_init(void)
 {
@@ -2626,11 +2675,13 @@ static int __init apic_set_verbosity(char *arg)
                apic_verbosity = APIC_DEBUG;
        else if (strcmp("verbose", arg) == 0)
                apic_verbosity = APIC_VERBOSE;
+#ifdef CONFIG_X86_64
        else {
                pr_warning("APIC Verbosity level %s not recognised"
                        " use apic=verbose or apic=debug\n", arg);
                return -EINVAL;
        }
+#endif
 
        return 0;
 }
index aa85690..25a8702 100644 (file)
@@ -151,7 +151,7 @@ static struct apic apic_flat __ro_after_init = {
        .apic_id_valid                  = default_apic_id_valid,
        .apic_id_registered             = flat_apic_id_registered,
 
-       .irq_delivery_mode              = dest_LowestPrio,
+       .irq_delivery_mode              = dest_Fixed,
        .irq_dest_mode                  = 1, /* logical */
 
        .disable_esr                    = 0,
index 7b659c4..5078b5c 100644 (file)
@@ -110,7 +110,7 @@ struct apic apic_noop __ro_after_init = {
        .apic_id_valid                  = default_apic_id_valid,
        .apic_id_registered             = noop_apic_id_registered,
 
-       .irq_delivery_mode              = dest_LowestPrio,
+       .irq_delivery_mode              = dest_Fixed,
        /* logical delivery broadcast to all CPUs: */
        .irq_dest_mode                  = 1,
 
index 201579d..8a79634 100644 (file)
@@ -2988,7 +2988,7 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
 }
 
 int mp_irqdomain_activate(struct irq_domain *domain,
-                         struct irq_data *irq_data, bool early)
+                         struct irq_data *irq_data, bool reserve)
 {
        unsigned long flags;
 
index 9b18be7..ce503c9 100644 (file)
@@ -39,17 +39,13 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
                ((apic->irq_dest_mode == 0) ?
                        MSI_ADDR_DEST_MODE_PHYSICAL :
                        MSI_ADDR_DEST_MODE_LOGICAL) |
-               ((apic->irq_delivery_mode != dest_LowestPrio) ?
-                       MSI_ADDR_REDIRECTION_CPU :
-                       MSI_ADDR_REDIRECTION_LOWPRI) |
+               MSI_ADDR_REDIRECTION_CPU |
                MSI_ADDR_DEST_ID(cfg->dest_apicid);
 
        msg->data =
                MSI_DATA_TRIGGER_EDGE |
                MSI_DATA_LEVEL_ASSERT |
-               ((apic->irq_delivery_mode != dest_LowestPrio) ?
-                       MSI_DATA_DELIVERY_FIXED :
-                       MSI_DATA_DELIVERY_LOWPRI) |
+               MSI_DATA_DELIVERY_FIXED |
                MSI_DATA_VECTOR(cfg->vector);
 }
 
index fa22017..02e8acb 100644 (file)
@@ -105,7 +105,7 @@ static struct apic apic_default __ro_after_init = {
        .apic_id_valid                  = default_apic_id_valid,
        .apic_id_registered             = default_apic_id_registered,
 
-       .irq_delivery_mode              = dest_LowestPrio,
+       .irq_delivery_mode              = dest_Fixed,
        /* logical delivery broadcast to all CPUs: */
        .irq_dest_mode                  = 1,
 
index 6a823a2..3cc471b 100644 (file)
@@ -184,6 +184,7 @@ static void reserve_irq_vector_locked(struct irq_data *irqd)
        irq_matrix_reserve(vector_matrix);
        apicd->can_reserve = true;
        apicd->has_reserved = true;
+       irqd_set_can_reserve(irqd);
        trace_vector_reserve(irqd->irq, 0);
        vector_assign_managed_shutdown(irqd);
 }
@@ -368,8 +369,18 @@ static int activate_reserved(struct irq_data *irqd)
        int ret;
 
        ret = assign_irq_vector_any_locked(irqd);
-       if (!ret)
+       if (!ret) {
                apicd->has_reserved = false;
+               /*
+                * Core might have disabled reservation mode after
+                * allocating the irq descriptor. Ideally this should
+                * happen before allocation time, but that would require
+                * completely convoluted ways of transporting that
+                * information.
+                */
+               if (!irqd_can_reserve(irqd))
+                       apicd->can_reserve = false;
+       }
        return ret;
 }
 
@@ -398,21 +409,21 @@ static int activate_managed(struct irq_data *irqd)
 }
 
 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
-                              bool early)
+                              bool reserve)
 {
        struct apic_chip_data *apicd = apic_chip_data(irqd);
        unsigned long flags;
        int ret = 0;
 
        trace_vector_activate(irqd->irq, apicd->is_managed,
-                             apicd->can_reserve, early);
+                             apicd->can_reserve, reserve);
 
        /* Nothing to do for fixed assigned vectors */
        if (!apicd->can_reserve && !apicd->is_managed)
                return 0;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       if (early || irqd_is_managed_and_shutdown(irqd))
+       if (reserve || irqd_is_managed_and_shutdown(irqd))
                vector_assign_managed_shutdown(irqd);
        else if (apicd->is_managed)
                ret = activate_managed(irqd);
@@ -478,6 +489,7 @@ static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
        } else {
                /* Release the vector */
                apicd->can_reserve = true;
+               irqd_set_can_reserve(irqd);
                clear_irq_vector(irqd);
                realloc = true;
        }
@@ -530,20 +542,23 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
 
                err = assign_irq_vector_policy(irqd, info);
                trace_vector_setup(virq + i, false, err);
-               if (err)
+               if (err) {
+                       irqd->chip_data = NULL;
+                       free_apic_chip_data(apicd);
                        goto error;
+               }
        }
 
        return 0;
 
 error:
-       x86_vector_free_irqs(domain, virq, i + 1);
+       x86_vector_free_irqs(domain, virq, i);
        return err;
 }
 
 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
-void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
-                          struct irq_data *irqd, int ind)
+static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
+                                 struct irq_data *irqd, int ind)
 {
        unsigned int cpu, vector, prev_cpu, prev_vector;
        struct apic_chip_data *apicd;
index 622f13c..8b04234 100644 (file)
@@ -184,7 +184,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
        .apic_id_valid                  = x2apic_apic_id_valid,
        .apic_id_registered             = x2apic_apic_id_registered,
 
-       .irq_delivery_mode              = dest_LowestPrio,
+       .irq_delivery_mode              = dest_Fixed,
        .irq_dest_mode                  = 1, /* logical */
 
        .disable_esr                    = 0,
index 8ea7827..76417a9 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/sigframe.h>
 #include <asm/bootparam.h>
 #include <asm/suspend.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_XEN
 #include <xen/interface/xen.h>
@@ -93,4 +94,13 @@ void common(void) {
 
        BLANK();
        DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+
+       /* TLB state for the entry code */
+       OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
+
+       /* Layout info for cpu_entry_area */
+       OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
+       OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
+       OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
+       DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
 }
index dedf428..fa1261e 100644 (file)
@@ -47,13 +47,8 @@ void foo(void)
        BLANK();
 
        /* Offset from the sysenter stack to tss.sp0 */
-       DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
-              offsetofend(struct tss_struct, SYSENTER_stack));
-
-       /* Offset from cpu_tss to SYSENTER_stack */
-       OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
-       /* Size of SYSENTER_stack */
-       DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
+       DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+              offsetofend(struct cpu_entry_area, entry_stack_page.stack));
 
 #ifdef CONFIG_CC_STACKPROTECTOR
        BLANK();
index 630212f..bf51e51 100644 (file)
@@ -23,6 +23,9 @@ int main(void)
 #ifdef CONFIG_PARAVIRT
        OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
        OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
+#ifdef CONFIG_DEBUG_ENTRY
+       OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl);
+#endif
        BLANK();
 #endif
 
@@ -63,6 +66,7 @@ int main(void)
 
        OFFSET(TSS_ist, tss_struct, x86_tss.ist);
        OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+       OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
        BLANK();
 
 #ifdef CONFIG_CC_STACKPROTECTOR
index d58184b..ea831c8 100644 (file)
@@ -804,8 +804,11 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x17: init_amd_zn(c); break;
        }
 
-       /* Enable workaround for FXSAVE leak */
-       if (c->x86 >= 6)
+       /*
+        * Enable workaround for FXSAVE leak on CPUs
+        * without a XSaveErPtr feature
+        */
+       if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
                set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 
        cpu_detect_cache_sizes(c);
@@ -826,8 +829,32 @@ static void init_amd(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_K8);
 
        if (cpu_has(c, X86_FEATURE_XMM2)) {
-               /* MFENCE stops RDTSC speculation */
-               set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+               unsigned long long val;
+               int ret;
+
+               /*
+                * A serializing LFENCE has less overhead than MFENCE, so
+                * use it for execution serialization.  On families which
+                * don't have that MSR, LFENCE is already serializing.
+                * msr_set_bit() uses the safe accessors, too, even if the MSR
+                * is not present.
+                */
+               msr_set_bit(MSR_F10H_DECFG,
+                           MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+
+               /*
+                * Verify that the MSR write was successful (could be running
+                * under a hypervisor) and only then assume that LFENCE is
+                * serializing.
+                */
+               ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
+               if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
+                       /* A serializing LFENCE stops RDTSC speculation */
+                       set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+               } else {
+                       /* MFENCE stops RDTSC speculation */
+                       set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+               }
        }
 
        /*
index ba0b242..390b3dc 100644 (file)
  */
 #include <linux/init.h>
 #include <linux/utsname.h>
+#include <linux/cpu.h>
+
+#include <asm/nospec-branch.h>
+#include <asm/cmdline.h>
 #include <asm/bugs.h>
 #include <asm/processor.h>
 #include <asm/processor-flags.h>
@@ -19,6 +23,9 @@
 #include <asm/alternative.h>
 #include <asm/pgtable.h>
 #include <asm/set_memory.h>
+#include <asm/intel-family.h>
+
+static void __init spectre_v2_select_mitigation(void);
 
 void __init check_bugs(void)
 {
@@ -29,6 +36,9 @@ void __init check_bugs(void)
                print_cpu_info(&boot_cpu_data);
        }
 
+       /* Select the proper spectre mitigation before patching alternatives */
+       spectre_v2_select_mitigation();
+
 #ifdef CONFIG_X86_32
        /*
         * Check whether we are able to run this kernel safely on SMP.
@@ -60,3 +70,214 @@ void __init check_bugs(void)
                set_memory_4k((unsigned long)__va(0), 1);
 #endif
 }
+
+/* The kernel command line selection */
+enum spectre_v2_mitigation_cmd {
+       SPECTRE_V2_CMD_NONE,
+       SPECTRE_V2_CMD_AUTO,
+       SPECTRE_V2_CMD_FORCE,
+       SPECTRE_V2_CMD_RETPOLINE,
+       SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+       SPECTRE_V2_CMD_RETPOLINE_AMD,
+};
+
+static const char *spectre_v2_strings[] = {
+       [SPECTRE_V2_NONE]                       = "Vulnerable",
+       [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
+       [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
+       [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
+       [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
+};
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
+
+static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
+static void __init spec2_print_if_insecure(const char *reason)
+{
+       if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               pr_info("%s\n", reason);
+}
+
+static void __init spec2_print_if_secure(const char *reason)
+{
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               pr_info("%s\n", reason);
+}
+
+static inline bool retp_compiler(void)
+{
+       return __is_defined(RETPOLINE);
+}
+
+static inline bool match_option(const char *arg, int arglen, const char *opt)
+{
+       int len = strlen(opt);
+
+       return len == arglen && !strncmp(arg, opt, len);
+}
+
+static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+{
+       char arg[20];
+       int ret;
+
+       ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
+                                 sizeof(arg));
+       if (ret > 0)  {
+               if (match_option(arg, ret, "off")) {
+                       goto disable;
+               } else if (match_option(arg, ret, "on")) {
+                       spec2_print_if_secure("force enabled on command line.");
+                       return SPECTRE_V2_CMD_FORCE;
+               } else if (match_option(arg, ret, "retpoline")) {
+                       spec2_print_if_insecure("retpoline selected on command line.");
+                       return SPECTRE_V2_CMD_RETPOLINE;
+               } else if (match_option(arg, ret, "retpoline,amd")) {
+                       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+                               pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
+                               return SPECTRE_V2_CMD_AUTO;
+                       }
+                       spec2_print_if_insecure("AMD retpoline selected on command line.");
+                       return SPECTRE_V2_CMD_RETPOLINE_AMD;
+               } else if (match_option(arg, ret, "retpoline,generic")) {
+                       spec2_print_if_insecure("generic retpoline selected on command line.");
+                       return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
+               } else if (match_option(arg, ret, "auto")) {
+                       return SPECTRE_V2_CMD_AUTO;
+               }
+       }
+
+       if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+               return SPECTRE_V2_CMD_AUTO;
+disable:
+       spec2_print_if_insecure("disabled on command line.");
+       return SPECTRE_V2_CMD_NONE;
+}
+
+/* Check for Skylake-like CPUs (for RSB handling) */
+static bool __init is_skylake_era(void)
+{
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+           boot_cpu_data.x86 == 6) {
+               switch (boot_cpu_data.x86_model) {
+               case INTEL_FAM6_SKYLAKE_MOBILE:
+               case INTEL_FAM6_SKYLAKE_DESKTOP:
+               case INTEL_FAM6_SKYLAKE_X:
+               case INTEL_FAM6_KABYLAKE_MOBILE:
+               case INTEL_FAM6_KABYLAKE_DESKTOP:
+                       return true;
+               }
+       }
+       return false;
+}
+
+static void __init spectre_v2_select_mitigation(void)
+{
+       enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+       enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
+
+       /*
+        * If the CPU is not affected and the command line mode is NONE or AUTO
+        * then nothing to do.
+        */
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
+           (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
+               return;
+
+       switch (cmd) {
+       case SPECTRE_V2_CMD_NONE:
+               return;
+
+       case SPECTRE_V2_CMD_FORCE:
+               /* FALLTRHU */
+       case SPECTRE_V2_CMD_AUTO:
+               goto retpoline_auto;
+
+       case SPECTRE_V2_CMD_RETPOLINE_AMD:
+               if (IS_ENABLED(CONFIG_RETPOLINE))
+                       goto retpoline_amd;
+               break;
+       case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
+               if (IS_ENABLED(CONFIG_RETPOLINE))
+                       goto retpoline_generic;
+               break;
+       case SPECTRE_V2_CMD_RETPOLINE:
+               if (IS_ENABLED(CONFIG_RETPOLINE))
+                       goto retpoline_auto;
+               break;
+       }
+       pr_err("kernel not compiled with retpoline; no mitigation available!");
+       return;
+
+retpoline_auto:
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+       retpoline_amd:
+               if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+                       pr_err("LFENCE not serializing. Switching to generic retpoline\n");
+                       goto retpoline_generic;
+               }
+               mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
+                                        SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
+               setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
+               setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+       } else {
+       retpoline_generic:
+               mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
+                                        SPECTRE_V2_RETPOLINE_MINIMAL;
+               setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+       }
+
+       spectre_v2_enabled = mode;
+       pr_info("%s\n", spectre_v2_strings[mode]);
+
+       /*
+        * If neither SMEP or KPTI are available, there is a risk of
+        * hitting userspace addresses in the RSB after a context switch
+        * from a shallow call stack to a deeper one. To prevent this fill
+        * the entire RSB, even when using IBRS.
+        *
+        * Skylake era CPUs have a separate issue with *underflow* of the
+        * RSB, when they will predict 'ret' targets from the generic BTB.
+        * The proper mitigation for this is IBRS. If IBRS is not supported
+        * or deactivated in favour of retpolines the RSB fill on context
+        * switch is required.
+        */
+       if ((!boot_cpu_has(X86_FEATURE_PTI) &&
+            !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
+               setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+               pr_info("Filling RSB on context switch\n");
+       }
+}
+
+#undef pr_fmt
+
+#ifdef CONFIG_SYSFS
+ssize_t cpu_show_meltdown(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+               return sprintf(buf, "Not affected\n");
+       if (boot_cpu_has(X86_FEATURE_PTI))
+               return sprintf(buf, "Mitigation: PTI\n");
+       return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+               return sprintf(buf, "Not affected\n");
+       return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               return sprintf(buf, "Not affected\n");
+
+       return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
+}
+#endif
index fa998ca..ef29ad0 100644 (file)
@@ -476,8 +476,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
        return NULL;            /* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS];
-__u32 cpu_caps_set[NCAPINTS];
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
 
 void load_percpu_segment(int cpu)
 {
@@ -490,28 +490,23 @@ void load_percpu_segment(int cpu)
        load_stack_canary_segment();
 }
 
-/* Setup the fixmap mapping only once per-processor */
-static inline void setup_fixmap_gdt(int cpu)
-{
-#ifdef CONFIG_X86_64
-       /* On 64-bit systems, we use a read-only fixmap GDT. */
-       pgprot_t prot = PAGE_KERNEL_RO;
-#else
-       /*
-        * On native 32-bit systems, the GDT cannot be read-only because
-        * our double fault handler uses a task gate, and entering through
-        * a task gate needs to change an available TSS to busy.  If the GDT
-        * is read-only, that will triple fault.
-        *
-        * On Xen PV, the GDT must be read-only because the hypervisor requires
-        * it.
-        */
-       pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ?
-               PAGE_KERNEL_RO : PAGE_KERNEL;
+#ifdef CONFIG_X86_32
+/* The 32-bit entry code needs to find cpu_entry_area. */
+DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
 #endif
 
-       __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot);
-}
+#ifdef CONFIG_X86_64
+/*
+ * Special IST stacks which the CPU switches to when it calls
+ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+ * limit), all of them are 4K, except the debug stack which
+ * is 8K.
+ */
+static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
+         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
+};
+#endif
 
 /* Load the original GDT from the per-cpu structure */
 void load_direct_gdt(int cpu)
@@ -747,7 +742,7 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
 {
        int i;
 
-       for (i = 0; i < NCAPINTS; i++) {
+       for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
                c->x86_capability[i] &= ~cpu_caps_cleared[i];
                c->x86_capability[i] |= cpu_caps_set[i];
        }
@@ -927,6 +922,13 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        }
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+
+       if (c->x86_vendor != X86_VENDOR_AMD)
+               setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
        fpu__init_system(c);
 
 #ifdef CONFIG_X86_32
@@ -1250,7 +1252,7 @@ void enable_sep_cpu(void)
                return;
 
        cpu = get_cpu();
-       tss = &per_cpu(cpu_tss, cpu);
+       tss = &per_cpu(cpu_tss_rw, cpu);
 
        /*
         * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
@@ -1259,11 +1261,7 @@ void enable_sep_cpu(void)
 
        tss->x86_tss.ss1 = __KERNEL_CS;
        wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
-
-       wrmsr(MSR_IA32_SYSENTER_ESP,
-             (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
-             0);
-
+       wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
        wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
 
        put_cpu();
@@ -1357,25 +1355,22 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
-/*
- * Special IST stacks which the CPU switches to when it calls
- * an IST-marked descriptor entry. Up to 7 stacks (hardware
- * limit), all of them are 4K, except the debug stack which
- * is 8K.
- */
-static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
-         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
-         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
-};
-
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
-       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
-
 /* May not be marked __init: used by software suspend */
 void syscall_init(void)
 {
+       extern char _entry_trampoline[];
+       extern char entry_SYSCALL_64_trampoline[];
+
+       int cpu = smp_processor_id();
+       unsigned long SYSCALL64_entry_trampoline =
+               (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
+               (entry_SYSCALL_64_trampoline - _entry_trampoline);
+
        wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
-       wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
+       if (static_cpu_has(X86_FEATURE_PTI))
+               wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
+       else
+               wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
 
 #ifdef CONFIG_IA32_EMULATION
        wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
@@ -1386,7 +1381,7 @@ void syscall_init(void)
         * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
         */
        wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
        wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
 #else
        wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
@@ -1530,7 +1525,7 @@ void cpu_init(void)
        if (cpu)
                load_ucode_ap();
 
-       t = &per_cpu(cpu_tss, cpu);
+       t = &per_cpu(cpu_tss_rw, cpu);
        oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
@@ -1569,7 +1564,7 @@ void cpu_init(void)
         * set up and load the per-CPU TSS
         */
        if (!oist->ist[0]) {
-               char *estacks = per_cpu(exception_stacks, cpu);
+               char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
 
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
                        estacks += exception_stack_sizes[v];
@@ -1580,7 +1575,7 @@ void cpu_init(void)
                }
        }
 
-       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+       t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
        /*
         * <= is required because the CPU will access up to
@@ -1596,11 +1591,12 @@ void cpu_init(void)
        enter_lazy_tlb(&init_mm, me);
 
        /*
-        * Initialize the TSS.  Don't bother initializing sp0, as the initial
-        * task never enters user mode.
+        * Initialize the TSS.  sp0 points to the entry trampoline stack
+        * regardless of what task is running.
         */
-       set_tss_desc(cpu, t);
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
+       load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
        load_mm_ldt(&init_mm);
 
@@ -1612,7 +1608,6 @@ void cpu_init(void)
        if (is_uv_system())
                uv_cpu_init();
 
-       setup_fixmap_gdt(cpu);
        load_fixmap_gdt(cpu);
 }
 
@@ -1622,7 +1617,7 @@ void cpu_init(void)
 {
        int cpu = smp_processor_id();
        struct task_struct *curr = current;
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
 
        wait_for_master_cpu(cpu);
 
@@ -1657,12 +1652,12 @@ void cpu_init(void)
         * Initialize the TSS.  Don't bother initializing sp0, as the initial
         * task never enters user mode.
         */
-       set_tss_desc(cpu, t);
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
 
        load_mm_ldt(&init_mm);
 
-       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+       t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
 #ifdef CONFIG_DOUBLEFAULT
        /* Set up doublefault TSS pointer in the GDT */
@@ -1674,7 +1669,6 @@ void cpu_init(void)
 
        fpu__init_cpu();
 
-       setup_fixmap_gdt(cpu);
        load_fixmap_gdt(cpu);
 }
 #endif
index 88dcf84..9944237 100644 (file)
@@ -525,10 +525,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                 */
                if (static_branch_unlikely(&rdt_mon_enable_key))
                        rmdir_mondata_subdir_allrdtgrp(r, d->id);
-               kfree(d->ctrl_val);
-               kfree(d->rmid_busy_llc);
-               kfree(d->mbm_total);
-               kfree(d->mbm_local);
                list_del(&d->list);
                if (is_mbm_enabled())
                        cancel_delayed_work(&d->mbm_over);
@@ -545,6 +541,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                        cancel_delayed_work(&d->cqm_limbo);
                }
 
+               kfree(d->ctrl_val);
+               kfree(d->rmid_busy_llc);
+               kfree(d->mbm_total);
+               kfree(d->mbm_local);
                kfree(d);
                return;
        }
index b1d616d..868e412 100644 (file)
@@ -1785,6 +1785,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
 void (*machine_check_vector)(struct pt_regs *, long error_code) =
                                                unexpected_machine_check;
 
+dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
+{
+       machine_check_vector(regs, error_code);
+}
+
 /*
  * Called for each booted CPU to set up machine checks.
  * Must be called with preempt off:
index c6daec4..330b846 100644 (file)
@@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
 #define F14H_MPB_MAX_SIZE 1824
 #define F15H_MPB_MAX_SIZE 4096
 #define F16H_MPB_MAX_SIZE 3458
+#define F17H_MPB_MAX_SIZE 3200
 
        switch (family) {
        case 0x14:
@@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
        case 0x16:
                max_size = F16H_MPB_MAX_SIZE;
                break;
+       case 0x17:
+               max_size = F17H_MPB_MAX_SIZE;
+               break;
        default:
                max_size = F1XH_MPB_MAX_SIZE;
                break;
index c4fa4a8..e4fc595 100644 (file)
@@ -239,7 +239,7 @@ static int __init save_microcode_in_initrd(void)
                break;
        case X86_VENDOR_AMD:
                if (c->x86 >= 0x10)
-                       return save_microcode_in_initrd_amd(cpuid_eax(1));
+                       ret = save_microcode_in_initrd_amd(cpuid_eax(1));
                break;
        default:
                break;
index 7dbcb7a..f7c55b0 100644 (file)
@@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 /* Current microcode patch used in early patching on the APs. */
 static struct microcode_intel *intel_ucode_patch;
 
+/* last level cache size per core */
+static int llc_size_per_core;
+
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
                                        unsigned int s2, unsigned int p2)
 {
@@ -565,15 +568,6 @@ static void print_ucode(struct ucode_cpu_info *uci)
 }
 #else
 
-/*
- * Flush global tlb. We only do this in x86_64 where paging has been enabled
- * already and PGE should be enabled as well.
- */
-static inline void flush_tlb_early(void)
-{
-       __native_flush_tlb_global_irq_disabled();
-}
-
 static inline void print_ucode(struct ucode_cpu_info *uci)
 {
        struct microcode_intel *mc;
@@ -602,10 +596,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
        if (rev != mc->hdr.rev)
                return -1;
 
-#ifdef CONFIG_X86_64
-       /* Flush global tlb. This is precaution. */
-       flush_tlb_early();
-#endif
        uci->cpu_sig.rev = rev;
 
        if (early)
@@ -923,8 +913,19 @@ static bool is_blacklisted(unsigned int cpu)
 {
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-       if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
-               pr_err_once("late loading on model 79 is disabled.\n");
+       /*
+        * Late loading on model 79 with microcode revision less than 0x0b000021
+        * and LLC size per core bigger than 2.5MB may result in a system hang.
+        * This behavior is documented in item BDF90, #334165 (Intel Xeon
+        * Processor E7-8800/4800 v4 Product Family).
+        */
+       if (c->x86 == 6 &&
+           c->x86_model == INTEL_FAM6_BROADWELL_X &&
+           c->x86_mask == 0x01 &&
+           llc_size_per_core > 2621440 &&
+           c->microcode < 0x0b000021) {
+               pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+               pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
                return true;
        }
 
@@ -979,6 +980,15 @@ static struct microcode_ops microcode_intel_ops = {
        .apply_microcode                  = apply_microcode_intel,
 };
 
+static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
+{
+       u64 llc_size = c->x86_cache_size * 1024;
+
+       do_div(llc_size, c->x86_max_cores);
+
+       return (int)llc_size;
+}
+
 struct microcode_ops * __init init_intel_microcode(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -989,5 +999,7 @@ struct microcode_ops * __init init_intel_microcode(void)
                return NULL;
        }
 
+       llc_size_per_core = calc_llc_size_per_core(c);
+
        return &microcode_intel_ops;
 }
index 05459ad..d0e6976 100644 (file)
@@ -21,7 +21,6 @@ struct cpuid_bit {
 static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_APERFMPERF,       CPUID_ECX,  0, 0x00000006, 0 },
        { X86_FEATURE_EPB,              CPUID_ECX,  3, 0x00000006, 0 },
-       { X86_FEATURE_INTEL_PT,         CPUID_EBX, 25, 0x00000007, 0 },
        { X86_FEATURE_AVX512_4VNNIW,    CPUID_EDX,  2, 0x00000007, 0 },
        { X86_FEATURE_AVX512_4FMAPS,    CPUID_EDX,  3, 0x00000007, 0 },
        { X86_FEATURE_CAT_L3,           CPUID_EBX,  1, 0x00000010, 0 },
index 0e662c5..0b8cedb 100644 (file)
@@ -50,25 +50,23 @@ static void doublefault_fn(void)
                cpu_relax();
 }
 
-struct tss_struct doublefault_tss __cacheline_aligned = {
-       .x86_tss = {
-               .sp0            = STACK_START,
-               .ss0            = __KERNEL_DS,
-               .ldt            = 0,
-               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
-
-               .ip             = (unsigned long) doublefault_fn,
-               /* 0x2 bit is always set */
-               .flags          = X86_EFLAGS_SF | 0x2,
-               .sp             = STACK_START,
-               .es             = __USER_DS,
-               .cs             = __KERNEL_CS,
-               .ss             = __KERNEL_DS,
-               .ds             = __USER_DS,
-               .fs             = __KERNEL_PERCPU,
-
-               .__cr3          = __pa_nodebug(swapper_pg_dir),
-       }
+struct x86_hw_tss doublefault_tss __cacheline_aligned = {
+       .sp0            = STACK_START,
+       .ss0            = __KERNEL_DS,
+       .ldt            = 0,
+       .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+
+       .ip             = (unsigned long) doublefault_fn,
+       /* 0x2 bit is always set */
+       .flags          = X86_EFLAGS_SF | 0x2,
+       .sp             = STACK_START,
+       .es             = __USER_DS,
+       .cs             = __KERNEL_CS,
+       .ss             = __KERNEL_DS,
+       .ds             = __USER_DS,
+       .fs             = __KERNEL_PERCPU,
+
+       .__cr3          = __pa_nodebug(swapper_pg_dir),
 };
 
 /* dummy for do_double_fault() call */
index f13b4c0..afbecff 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/nmi.h>
 #include <linux/sysfs.h>
 
+#include <asm/cpu_entry_area.h>
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
 
@@ -43,6 +44,24 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
        return true;
 }
 
+bool in_entry_stack(unsigned long *stack, struct stack_info *info)
+{
+       struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
+
+       void *begin = ss;
+       void *end = ss + 1;
+
+       if ((void *)stack < begin || (void *)stack >= end)
+               return false;
+
+       info->type      = STACK_TYPE_ENTRY;
+       info->begin     = begin;
+       info->end       = end;
+       info->next_sp   = NULL;
+
+       return true;
+}
+
 static void printk_stack_address(unsigned long address, int reliable,
                                 char *log_lvl)
 {
@@ -50,6 +69,39 @@ static void printk_stack_address(unsigned long address, int reliable,
        printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
 }
 
+void show_iret_regs(struct pt_regs *regs)
+{
+       printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip);
+       printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
+               regs->sp, regs->flags);
+}
+
+static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
+                                 bool partial)
+{
+       /*
+        * These on_stack() checks aren't strictly necessary: the unwind code
+        * has already validated the 'regs' pointer.  The checks are done for
+        * ordering reasons: if the registers are on the next stack, we don't
+        * want to print them out yet.  Otherwise they'll be shown as part of
+        * the wrong stack.  Later, when show_trace_log_lvl() switches to the
+        * next stack, this function will be called again with the same regs so
+        * they can be printed in the right context.
+        */
+       if (!partial && on_stack(info, regs, sizeof(*regs))) {
+               __show_regs(regs, 0);
+
+       } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
+                                      IRET_FRAME_SIZE)) {
+               /*
+                * When an interrupt or exception occurs in entry code, the
+                * full pt_regs might not have been saved yet.  In that case
+                * just print the iret frame.
+                */
+               show_iret_regs(regs);
+       }
+}
+
 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
                        unsigned long *stack, char *log_lvl)
 {
@@ -57,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
        struct stack_info stack_info = {0};
        unsigned long visit_mask = 0;
        int graph_idx = 0;
+       bool partial;
 
        printk("%sCall Trace:\n", log_lvl);
 
        unwind_start(&state, task, regs, stack);
        stack = stack ? : get_stack_pointer(task, regs);
+       regs = unwind_get_entry_regs(&state, &partial);
 
        /*
         * Iterate through the stacks, starting with the current stack pointer.
@@ -71,31 +125,35 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
         * - task stack
         * - interrupt stack
         * - HW exception stacks (double fault, nmi, debug, mce)
+        * - entry stack
         *
-        * x86-32 can have up to three stacks:
+        * x86-32 can have up to four stacks:
         * - task stack
         * - softirq stack
         * - hardirq stack
+        * - entry stack
         */
-       for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+       for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
                const char *stack_name;
 
-               /*
-                * If we overflowed the task stack into a guard page, jump back
-                * to the bottom of the usable stack.
-                */
-               if (task_stack_page(task) - (void *)stack < PAGE_SIZE)
-                       stack = task_stack_page(task);
-
-               if (get_stack_info(stack, task, &stack_info, &visit_mask))
-                       break;
+               if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
+                       /*
+                        * We weren't on a valid stack.  It's possible that
+                        * we overflowed a valid stack into a guard page.
+                        * See if the next page up is valid so that we can
+                        * generate some kind of backtrace if this happens.
+                        */
+                       stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
+                       if (get_stack_info(stack, task, &stack_info, &visit_mask))
+                               break;
+               }
 
                stack_name = stack_type_name(stack_info.type);
                if (stack_name)
                        printk("%s <%s>\n", log_lvl, stack_name);
 
-               if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-                       __show_regs(regs, 0);
+               if (regs)
+                       show_regs_if_on_stack(&stack_info, regs, partial);
 
                /*
                 * Scan the stack, printing any text addresses we find.  At the
@@ -119,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
 
                        /*
                         * Don't print regs->ip again if it was already printed
-                        * by __show_regs() below.
+                        * by show_regs_if_on_stack().
                         */
                        if (regs && stack == &regs->ip)
                                goto next;
@@ -154,9 +212,9 @@ next:
                        unwind_next_frame(&state);
 
                        /* if the frame has entry regs, print them */
-                       regs = unwind_get_entry_regs(&state);
-                       if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-                               __show_regs(regs, 0);
+                       regs = unwind_get_entry_regs(&state, &partial);
+                       if (regs)
+                               show_regs_if_on_stack(&stack_info, regs, partial);
                }
 
                if (stack_name)
@@ -252,11 +310,13 @@ int __die(const char *str, struct pt_regs *regs, long err)
        unsigned long sp;
 #endif
        printk(KERN_DEFAULT
-              "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter,
+              "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
               IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT"         : "",
               IS_ENABLED(CONFIG_SMP)     ? " SMP"             : "",
               debug_pagealloc_enabled()  ? " DEBUG_PAGEALLOC" : "",
-              IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "");
+              IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "",
+              IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
+              (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
 
        if (notify_die(DIE_OOPS, str, regs, err,
                        current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
index daefae8..04170f6 100644 (file)
@@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type)
        if (type == STACK_TYPE_SOFTIRQ)
                return "SOFTIRQ";
 
+       if (type == STACK_TYPE_ENTRY)
+               return "ENTRY_TRAMPOLINE";
+
        return NULL;
 }
 
@@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
        if (task != current)
                goto unknown;
 
+       if (in_entry_stack(stack, info))
+               goto recursion_check;
+
        if (in_hardirq_stack(stack, info))
                goto recursion_check;
 
index 88ce2ff..563e28d 100644 (file)
@@ -37,6 +37,15 @@ const char *stack_type_name(enum stack_type type)
        if (type == STACK_TYPE_IRQ)
                return "IRQ";
 
+       if (type == STACK_TYPE_ENTRY) {
+               /*
+                * On 64-bit, we have a generic entry stack that we
+                * use for all the kernel entry points, including
+                * SYSENTER.
+                */
+               return "ENTRY_TRAMPOLINE";
+       }
+
        if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
                return exception_stack_names[type - STACK_TYPE_EXCEPTION];
 
@@ -115,6 +124,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
        if (in_irq_stack(stack, info))
                goto recursion_check;
 
+       if (in_entry_stack(stack, info))
+               goto recursion_check;
+
        goto unknown;
 
 recursion_check:
index b6c6468..4c8440d 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/segment.h>
 #include <asm/export.h>
 #include <asm/ftrace.h>
+#include <asm/nospec-branch.h>
 
 #ifdef CC_USING_FENTRY
 # define function_hook __fentry__
@@ -197,7 +198,8 @@ ftrace_stub:
        movl    0x4(%ebp), %edx
        subl    $MCOUNT_INSN_SIZE, %eax
 
-       call    *ftrace_trace_function
+       movl    ftrace_trace_function, %ecx
+       CALL_NOSPEC %ecx
 
        popl    %edx
        popl    %ecx
@@ -241,5 +243,5 @@ return_to_handler:
        movl    %eax, %ecx
        popl    %edx
        popl    %eax
-       jmp     *%ecx
+       JMP_NOSPEC %ecx
 #endif
index c832291..91b2cff 100644 (file)
@@ -7,7 +7,8 @@
 #include <asm/ptrace.h>
 #include <asm/ftrace.h>
 #include <asm/export.h>
-
+#include <asm/nospec-branch.h>
+#include <asm/unwind_hints.h>
 
        .code64
        .section .entry.text, "ax"
@@ -20,7 +21,6 @@ EXPORT_SYMBOL(__fentry__)
 EXPORT_SYMBOL(mcount)
 #endif
 
-/* All cases save the original rbp (8 bytes) */
 #ifdef CONFIG_FRAME_POINTER
 # ifdef CC_USING_FENTRY
 /* Save parent and function stack frames (rip and rbp) */
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount)
 # endif
 #else
 /* No need to save a stack frame */
-# define MCOUNT_FRAME_SIZE     8
+# define MCOUNT_FRAME_SIZE     0
 #endif /* CONFIG_FRAME_POINTER */
 
 /* Size of stack used to save mcount regs in save_mcount_regs */
@@ -64,10 +64,10 @@ EXPORT_SYMBOL(mcount)
  */
 .macro save_mcount_regs added=0
 
-       /* Always save the original rbp */
+#ifdef CONFIG_FRAME_POINTER
+       /* Save the original rbp */
        pushq %rbp
 
-#ifdef CONFIG_FRAME_POINTER
        /*
         * Stack traces will stop at the ftrace trampoline if the frame pointer
         * is not set up properly. If fentry is used, we need to save a frame
@@ -105,7 +105,11 @@ EXPORT_SYMBOL(mcount)
         * Save the original RBP. Even though the mcount ABI does not
         * require this, it helps out callers.
         */
+#ifdef CONFIG_FRAME_POINTER
        movq MCOUNT_REG_SIZE-8(%rsp), %rdx
+#else
+       movq %rbp, %rdx
+#endif
        movq %rdx, RBP(%rsp)
 
        /* Copy the parent address into %rsi (second parameter) */
@@ -148,7 +152,7 @@ EXPORT_SYMBOL(mcount)
 
 ENTRY(function_hook)
        retq
-END(function_hook)
+ENDPROC(function_hook)
 
 ENTRY(ftrace_caller)
        /* save_mcount_regs fills in first two parameters */
@@ -184,7 +188,7 @@ GLOBAL(ftrace_graph_call)
 /* This is weak to keep gas from relaxing the jumps */
 WEAK(ftrace_stub)
        retq
-END(ftrace_caller)
+ENDPROC(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
        /* Save the current flags before any operations that can change them */
@@ -255,7 +259,7 @@ GLOBAL(ftrace_regs_caller_end)
 
        jmp ftrace_epilogue
 
-END(ftrace_regs_caller)
+ENDPROC(ftrace_regs_caller)
 
 
 #else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -286,12 +290,12 @@ trace:
         * ip and parent ip are used and the list function is called when
         * function tracing is enabled.
         */
-       call   *ftrace_trace_function
-
+       movq ftrace_trace_function, %r8
+       CALL_NOSPEC %r8
        restore_mcount_regs
 
        jmp fgraph_trace
-END(function_hook)
+ENDPROC(function_hook)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -313,9 +317,10 @@ ENTRY(ftrace_graph_caller)
        restore_mcount_regs
 
        retq
-END(ftrace_graph_caller)
+ENDPROC(ftrace_graph_caller)
 
-GLOBAL(return_to_handler)
+ENTRY(return_to_handler)
+       UNWIND_HINT_EMPTY
        subq  $24, %rsp
 
        /* Save the return values */
@@ -329,5 +334,6 @@ GLOBAL(return_to_handler)
        movq 8(%rsp), %rdx
        movq (%rsp), %rax
        addq $24, %rsp
-       jmp *%rdi
+       JMP_NOSPEC %rdi
+END(return_to_handler)
 #endif
index 6a5d757..7ba5d81 100644 (file)
@@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
        p = fixup_pointer(&phys_base, physaddr);
        *p += load_delta - sme_get_me_mask();
 
-       /* Encrypt the kernel (if SME is active) */
-       sme_encrypt_kernel();
+       /* Encrypt the kernel and related (if SME is active) */
+       sme_encrypt_kernel(bp);
 
        /*
         * Return the SME encryption mask (if SME is active) to be used as a
index 7dca675..04a625f 100644 (file)
@@ -341,6 +341,27 @@ GLOBAL(early_recursion_flag)
        .balign PAGE_SIZE; \
 GLOBAL(name)
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Each PGD needs to be 8k long and 8k aligned.  We do not
+ * ever go out to userspace with these, so we do not
+ * strictly *need* the second page, but this allows us to
+ * have a single set_pgd() implementation that does not
+ * need to worry about whether it has 4k or 8k to work
+ * with.
+ *
+ * This ensures PGDs are 8k long:
+ */
+#define PTI_USER_PGD_FILL      512
+/* This ensures they are 8k-aligned: */
+#define NEXT_PGD_PAGE(name) \
+       .balign 2 * PAGE_SIZE; \
+GLOBAL(name)
+#else
+#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
+#define PTI_USER_PGD_FILL      0
+#endif
+
 /* Automate the creation of 1 to 1 mapping pmd entries */
 #define PMDS(START, PERM, COUNT)                       \
        i = 0 ;                                         \
@@ -350,13 +371,14 @@ GLOBAL(name)
        .endr
 
        __INITDATA
-NEXT_PAGE(early_top_pgt)
+NEXT_PGD_PAGE(early_top_pgt)
        .fill   511,8,0
 #ifdef CONFIG_X86_5LEVEL
        .quad   level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 #else
        .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 #endif
+       .fill   PTI_USER_PGD_FILL,8,0
 
 NEXT_PAGE(early_dynamic_pgts)
        .fill   512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -364,13 +386,14 @@ NEXT_PAGE(early_dynamic_pgts)
        .data
 
 #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
-NEXT_PAGE(init_top_pgt)
+NEXT_PGD_PAGE(init_top_pgt)
        .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
        .org    init_top_pgt + PGD_PAGE_OFFSET*8, 0
        .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
        .org    init_top_pgt + PGD_START_KERNEL*8, 0
        /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
        .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+       .fill   PTI_USER_PGD_FILL,8,0
 
 NEXT_PAGE(level3_ident_pgt)
        .quad   level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
@@ -381,8 +404,9 @@ NEXT_PAGE(level2_ident_pgt)
         */
        PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
 #else
-NEXT_PAGE(init_top_pgt)
+NEXT_PGD_PAGE(init_top_pgt)
        .fill   512,8,0
+       .fill   PTI_USER_PGD_FILL,8,0
 #endif
 
 #ifdef CONFIG_X86_5LEVEL
index d985cef..56d99be 100644 (file)
@@ -56,7 +56,7 @@ struct idt_data {
  * Early traps running on the DEFAULT_STACK because the other interrupt
  * stacks work only after cpu_init().
  */
-static const __initdata struct idt_data early_idts[] = {
+static const __initconst struct idt_data early_idts[] = {
        INTG(X86_TRAP_DB,               debug),
        SYSG(X86_TRAP_BP,               int3),
 #ifdef CONFIG_X86_32
@@ -70,7 +70,7 @@ static const __initdata struct idt_data early_idts[] = {
  * the traps which use them are reinitialized with IST after cpu_init() has
  * set up TSS.
  */
-static const __initdata struct idt_data def_idts[] = {
+static const __initconst struct idt_data def_idts[] = {
        INTG(X86_TRAP_DE,               divide_error),
        INTG(X86_TRAP_NMI,              nmi),
        INTG(X86_TRAP_BR,               bounds),
@@ -108,7 +108,7 @@ static const __initdata struct idt_data def_idts[] = {
 /*
  * The APIC and SMP idt entries
  */
-static const __initdata struct idt_data apic_idts[] = {
+static const __initconst struct idt_data apic_idts[] = {
 #ifdef CONFIG_SMP
        INTG(RESCHEDULE_VECTOR,         reschedule_interrupt),
        INTG(CALL_FUNCTION_VECTOR,      call_function_interrupt),
@@ -150,7 +150,7 @@ static const __initdata struct idt_data apic_idts[] = {
  * Early traps running on the DEFAULT_STACK because the other interrupt
  * stacks work only after cpu_init().
  */
-static const __initdata struct idt_data early_pf_idts[] = {
+static const __initconst struct idt_data early_pf_idts[] = {
        INTG(X86_TRAP_PF,               page_fault),
 };
 
@@ -158,7 +158,7 @@ static const __initdata struct idt_data early_pf_idts[] = {
  * Override for the debug_idt. Same as the default, but with interrupt
  * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
  */
-static const __initdata struct idt_data dbg_idts[] = {
+static const __initconst struct idt_data dbg_idts[] = {
        INTG(X86_TRAP_DB,       debug),
        INTG(X86_TRAP_BP,       int3),
 };
@@ -180,7 +180,7 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
  * The exceptions which use Interrupt stacks. They are setup after
  * cpu_init() when the TSS has been initialized.
  */
-static const __initdata struct idt_data ist_idts[] = {
+static const __initconst struct idt_data ist_idts[] = {
        ISTG(X86_TRAP_DB,       debug,          DEBUG_STACK),
        ISTG(X86_TRAP_NMI,      nmi,            NMI_STACK),
        SISTG(X86_TRAP_BP,      int3,           DEBUG_STACK),
index 3feb648..2f72330 100644 (file)
@@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
         * because the ->io_bitmap_max value must match the bitmap
         * contents:
         */
-       tss = &per_cpu(cpu_tss, get_cpu());
+       tss = &per_cpu(cpu_tss_rw, get_cpu());
 
        if (turn_on)
                bitmap_clear(t->io_bitmap_ptr, from, num);
index 49cfd9f..68e1867 100644 (file)
@@ -219,18 +219,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
        /* high bit used in ret_from_ code  */
        unsigned vector = ~regs->orig_ax;
 
-       /*
-        * NB: Unlike exception entries, IRQ entries do not reliably
-        * handle context tracking in the low-level entry code.  This is
-        * because syscall entries execute briefly with IRQs on before
-        * updating context tracking state, so we can take an IRQ from
-        * kernel mode with CONTEXT_USER.  The low-level entry code only
-        * updates the context if we came from user mode, so we won't
-        * switch to CONTEXT_KERNEL.  We'll fix that once the syscall
-        * code is cleaned up enough that we can cleanly defer enabling
-        * IRQs.
-        */
-
        entering_irq();
 
        /* entering_irq() tells RCU that we're not quiescent.  Check it. */
index a83b334..c1bdbd3 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mm.h>
 
 #include <asm/apic.h>
+#include <asm/nospec-branch.h>
 
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 
@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
 static void call_on_stack(void *func, void *stack)
 {
        asm volatile("xchgl     %%ebx,%%esp     \n"
-                    "call      *%%edi          \n"
+                    CALL_NOSPEC
                     "movl      %%ebx,%%esp     \n"
                     : "=b" (stack)
                     : "0" (stack),
-                      "D"(func)
+                      [thunk_target] "D"(func)
                     : "memory", "cc", "edx", "ecx", "eax");
 }
 
@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
                call_on_stack(print_stack_overflow, isp);
 
        asm volatile("xchgl     %%ebx,%%esp     \n"
-                    "call      *%%edi          \n"
+                    CALL_NOSPEC
                     "movl      %%ebx,%%esp     \n"
                     : "=a" (arg1), "=b" (isp)
                     :  "0" (desc),   "1" (isp),
-                       "D" (desc->handle_irq)
+                       [thunk_target] "D" (desc->handle_irq)
                     : "memory", "cc", "ecx");
        return 1;
 }
index 020efbf..d86e344 100644 (file)
@@ -57,10 +57,10 @@ static inline void stack_overflow_check(struct pt_regs *regs)
        if (regs->sp >= estack_top && regs->sp <= estack_bottom)
                return;
 
-       WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n",
+       WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n",
                current->comm, curbase, regs->sp,
                irq_stack_top, irq_stack_bottom,
-               estack_top, estack_bottom);
+               estack_top, estack_bottom, (void *)regs->ip);
 
        if (sysctl_panic_on_stackoverflow)
                panic("low stack detected by irq handler - check messages\n");
index 8da3e90..a539410 100644 (file)
@@ -61,6 +61,9 @@ void __init init_ISA_irqs(void)
        struct irq_chip *chip = legacy_pic->chip;
        int i;
 
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+       init_bsp_APIC();
+#endif
        legacy_pic->init(0);
 
        for (i = 0; i < nr_legacy_irqs(); i++)
index e941136..203d398 100644 (file)
@@ -40,6 +40,7 @@
 #include <asm/debugreg.h>
 #include <asm/set_memory.h>
 #include <asm/sections.h>
+#include <asm/nospec-branch.h>
 
 #include "common.h"
 
@@ -203,7 +204,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
 }
 
 /* Check whether insn is indirect jump */
-static int insn_is_indirect_jump(struct insn *insn)
+static int __insn_is_indirect_jump(struct insn *insn)
 {
        return ((insn->opcode.bytes[0] == 0xff &&
                (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -237,6 +238,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
        return (start <= target && target <= start + len);
 }
 
+static int insn_is_indirect_jump(struct insn *insn)
+{
+       int ret = __insn_is_indirect_jump(insn);
+
+#ifdef CONFIG_RETPOLINE
+       /*
+        * Jump to x86_indirect_thunk_* is treated as an indirect jump.
+        * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
+        * older gcc may use indirect jump. So we add this check instead of
+        * replace indirect-jump check.
+        */
+       if (!ret)
+               ret = insn_jump_into_range(insn,
+                               (unsigned long)__indirect_thunk_start,
+                               (unsigned long)__indirect_thunk_end -
+                               (unsigned long)__indirect_thunk_start);
+#endif
+       return ret;
+}
+
 /* Decode whole function to ensure any instructions don't jump into target */
 static int can_optimize(unsigned long paddr)
 {
index 1c1eae9..26d713e 100644 (file)
@@ -5,6 +5,11 @@
  * Copyright (C) 2002 Andi Kleen
  *
  * This handles calls from both 32bit and 64bit mode.
+ *
+ * Lock order:
+ *     contex.ldt_usr_sem
+ *       mmap_sem
+ *         context.lock
  */
 
 #include <linux/errno.h>
@@ -19,6 +24,7 @@
 #include <linux/uaccess.h>
 
 #include <asm/ldt.h>
+#include <asm/tlb.h>
 #include <asm/desc.h>
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
@@ -42,17 +48,15 @@ static void refresh_ldt_segments(void)
 #endif
 }
 
-/* context.lock is held for us, so we don't need any locking. */
+/* context.lock is held by the task which issued the smp function call */
 static void flush_ldt(void *__mm)
 {
        struct mm_struct *mm = __mm;
-       mm_context_t *pc;
 
        if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
                return;
 
-       pc = &mm->context;
-       set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
+       load_mm_ldt(mm);
 
        refresh_ldt_segments();
 }
@@ -89,25 +93,143 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
                return NULL;
        }
 
+       /* The new LDT isn't aliased for PTI yet. */
+       new_ldt->slot = -1;
+
        new_ldt->nr_entries = num_entries;
        return new_ldt;
 }
 
+/*
+ * If PTI is enabled, this maps the LDT into the kernelmode and
+ * usermode tables for the given mm.
+ *
+ * There is no corresponding unmap function.  Even if the LDT is freed, we
+ * leave the PTEs around until the slot is reused or the mm is destroyed.
+ * This is harmless: the LDT is always in ordinary memory, and no one will
+ * access the freed slot.
+ *
+ * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
+ * it useful, and the flush would slow down modify_ldt().
+ */
+static int
+map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       bool is_vmalloc, had_top_level_entry;
+       unsigned long va;
+       spinlock_t *ptl;
+       pgd_t *pgd;
+       int i;
+
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return 0;
+
+       /*
+        * Any given ldt_struct should have map_ldt_struct() called at most
+        * once.
+        */
+       WARN_ON(ldt->slot != -1);
+
+       /*
+        * Did we already have the top level entry allocated?  We can't
+        * use pgd_none() for this because it doens't do anything on
+        * 4-level page table kernels.
+        */
+       pgd = pgd_offset(mm, LDT_BASE_ADDR);
+       had_top_level_entry = (pgd->pgd != 0);
+
+       is_vmalloc = is_vmalloc_addr(ldt->entries);
+
+       for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
+               unsigned long offset = i << PAGE_SHIFT;
+               const void *src = (char *)ldt->entries + offset;
+               unsigned long pfn;
+               pte_t pte, *ptep;
+
+               va = (unsigned long)ldt_slot_va(slot) + offset;
+               pfn = is_vmalloc ? vmalloc_to_pfn(src) :
+                       page_to_pfn(virt_to_page(src));
+               /*
+                * Treat the PTI LDT range as a *userspace* range.
+                * get_locked_pte() will allocate all needed pagetables
+                * and account for them in this mm.
+                */
+               ptep = get_locked_pte(mm, va, &ptl);
+               if (!ptep)
+                       return -ENOMEM;
+               /*
+                * Map it RO so the easy to find address is not a primary
+                * target via some kernel interface which misses a
+                * permission check.
+                */
+               pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL));
+               set_pte_at(mm, va, ptep, pte);
+               pte_unmap_unlock(ptep, ptl);
+       }
+
+       if (mm->context.ldt) {
+               /*
+                * We already had an LDT.  The top-level entry should already
+                * have been allocated and synchronized with the usermode
+                * tables.
+                */
+               WARN_ON(!had_top_level_entry);
+               if (static_cpu_has(X86_FEATURE_PTI))
+                       WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
+       } else {
+               /*
+                * This is the first time we're mapping an LDT for this process.
+                * Sync the pgd to the usermode tables.
+                */
+               WARN_ON(had_top_level_entry);
+               if (static_cpu_has(X86_FEATURE_PTI)) {
+                       WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
+                       set_pgd(kernel_to_user_pgdp(pgd), *pgd);
+               }
+       }
+
+       va = (unsigned long)ldt_slot_va(slot);
+       flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
+
+       ldt->slot = slot;
+#endif
+       return 0;
+}
+
+static void free_ldt_pgtables(struct mm_struct *mm)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       struct mmu_gather tlb;
+       unsigned long start = LDT_BASE_ADDR;
+       unsigned long end = start + (1UL << PGDIR_SHIFT);
+
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+
+       tlb_gather_mmu(&tlb, mm, start, end);
+       free_pgd_range(&tlb, start, end, start, end);
+       tlb_finish_mmu(&tlb, start, end);
+#endif
+}
+
 /* After calling this, the LDT is immutable. */
 static void finalize_ldt_struct(struct ldt_struct *ldt)
 {
        paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
 }
 
-/* context.lock is held */
-static void install_ldt(struct mm_struct *current_mm,
-                       struct ldt_struct *ldt)
+static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
 {
+       mutex_lock(&mm->context.lock);
+
        /* Synchronizes with READ_ONCE in load_mm_ldt. */
-       smp_store_release(&current_mm->context.ldt, ldt);
+       smp_store_release(&mm->context.ldt, ldt);
 
-       /* Activate the LDT for all CPUs using current_mm. */
-       on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
+       /* Activate the LDT for all CPUs using currents mm. */
+       on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
+
+       mutex_unlock(&mm->context.lock);
 }
 
 static void free_ldt_struct(struct ldt_struct *ldt)
@@ -124,27 +246,20 @@ static void free_ldt_struct(struct ldt_struct *ldt)
 }
 
 /*
- * we do not have to muck with descriptors here, that is
- * done in switch_mm() as needed.
+ * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
+ * the new task is not running, so nothing can be installed.
  */
-int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
+int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
 {
        struct ldt_struct *new_ldt;
-       struct mm_struct *old_mm;
        int retval = 0;
 
-       mutex_init(&mm->context.lock);
-       old_mm = current->mm;
-       if (!old_mm) {
-               mm->context.ldt = NULL;
+       if (!old_mm)
                return 0;
-       }
 
        mutex_lock(&old_mm->context.lock);
-       if (!old_mm->context.ldt) {
-               mm->context.ldt = NULL;
+       if (!old_mm->context.ldt)
                goto out_unlock;
-       }
 
        new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
        if (!new_ldt) {
@@ -156,6 +271,12 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
               new_ldt->nr_entries * LDT_ENTRY_SIZE);
        finalize_ldt_struct(new_ldt);
 
+       retval = map_ldt_struct(mm, new_ldt, 0);
+       if (retval) {
+               free_ldt_pgtables(mm);
+               free_ldt_struct(new_ldt);
+               goto out_unlock;
+       }
        mm->context.ldt = new_ldt;
 
 out_unlock:
@@ -174,13 +295,18 @@ void destroy_context_ldt(struct mm_struct *mm)
        mm->context.ldt = NULL;
 }
 
+void ldt_arch_exit_mmap(struct mm_struct *mm)
+{
+       free_ldt_pgtables(mm);
+}
+
 static int read_ldt(void __user *ptr, unsigned long bytecount)
 {
        struct mm_struct *mm = current->mm;
        unsigned long entries_size;
        int retval;
 
-       mutex_lock(&mm->context.lock);
+       down_read(&mm->context.ldt_usr_sem);
 
        if (!mm->context.ldt) {
                retval = 0;
@@ -209,7 +335,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
        retval = bytecount;
 
 out_unlock:
-       mutex_unlock(&mm->context.lock);
+       up_read(&mm->context.ldt_usr_sem);
        return retval;
 }
 
@@ -269,7 +395,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                        ldt.avl = 0;
        }
 
-       mutex_lock(&mm->context.lock);
+       if (down_write_killable(&mm->context.ldt_usr_sem))
+               return -EINTR;
 
        old_ldt       = mm->context.ldt;
        old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
@@ -286,12 +413,31 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
        new_ldt->entries[ldt_info.entry_number] = ldt;
        finalize_ldt_struct(new_ldt);
 
+       /*
+        * If we are using PTI, map the new LDT into the userspace pagetables.
+        * If there is already an LDT, use the other slot so that other CPUs
+        * will continue to use the old LDT until install_ldt() switches
+        * them over to the new LDT.
+        */
+       error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
+       if (error) {
+               /*
+                * This only can fail for the first LDT setup. If an LDT is
+                * already installed then the PTE page is already
+                * populated. Mop up a half populated page table.
+                */
+               if (!WARN_ON_ONCE(old_ldt))
+                       free_ldt_pgtables(mm);
+               free_ldt_struct(new_ldt);
+               goto out_unlock;
+       }
+
        install_ldt(mm, new_ldt);
        free_ldt_struct(old_ldt);
        error = 0;
 
 out_unlock:
-       mutex_unlock(&mm->context.lock);
+       up_write(&mm->context.ldt_usr_sem);
 out:
        return error;
 }
index 00bc751..edfede7 100644 (file)
@@ -48,8 +48,6 @@ static void load_segments(void)
                "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
                "\tmovl %%eax,%%ds\n"
                "\tmovl %%eax,%%es\n"
-               "\tmovl %%eax,%%fs\n"
-               "\tmovl %%eax,%%gs\n"
                "\tmovl %%eax,%%ss\n"
                : : : "eax", "memory");
 #undef STR
@@ -232,8 +230,8 @@ void machine_kexec(struct kimage *image)
         * The gdt & idt are now invalid.
         * If you want to load them you must set up your own idt & gdt.
         */
-       set_gdt(phys_to_virt(0), 0);
        idt_invalidate(phys_to_virt(0));
+       set_gdt(phys_to_virt(0), 0);
 
        /* now call it */
        image->start = relocate_kernel_ptr((unsigned long)image->head,
index ac0be82..9edadab 100644 (file)
@@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_mmu_ops, read_cr2);
                PATCH_SITE(pv_mmu_ops, read_cr3);
                PATCH_SITE(pv_mmu_ops, write_cr3);
-               PATCH_SITE(pv_mmu_ops, flush_tlb_single);
                PATCH_SITE(pv_cpu_ops, wbinvd);
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
                case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
index 97fb3e5..cb368c2 100644 (file)
@@ -47,7 +47,7 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
        .x86_tss = {
                /*
                 * .sp0 is only used when entering ring 0 from a lower
@@ -56,6 +56,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
                 * Poison it.
                 */
                .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
+
+#ifdef CONFIG_X86_64
+               /*
+                * .sp1 is cpu_current_top_of_stack.  The init task never
+                * runs user code, but cpu_current_top_of_stack should still
+                * be well defined before the first context switch.
+                */
+               .sp1 = TOP_OF_INIT_STACK,
+#endif
+
 #ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
                .ss1 = __KERNEL_CS,
@@ -71,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
          */
        .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
-#ifdef CONFIG_X86_32
-       .SYSENTER_stack_canary  = STACK_END_MAGIC,
-#endif
 };
-EXPORT_PER_CPU_SYMBOL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
 
 DEFINE_PER_CPU(bool, __tss_limit_invalid);
 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -104,7 +111,7 @@ void exit_thread(struct task_struct *tsk)
        struct fpu *fpu = &t->fpu;
 
        if (bp) {
-               struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
+               struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
 
                t->io_bitmap_ptr = NULL;
                clear_thread_flag(TIF_IO_BITMAP);
@@ -299,7 +306,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
        }
 
        if ((tifp ^ tifn) & _TIF_NOTSC)
-               cr4_toggle_bits(X86_CR4_TSD);
+               cr4_toggle_bits_irqsoff(X86_CR4_TSD);
 
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
@@ -373,19 +380,24 @@ void stop_this_cpu(void *dummy)
        disable_local_APIC();
        mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
 
+       /*
+        * Use wbinvd on processors that support SME. This provides support
+        * for performing a successful kexec when going from SME inactive
+        * to SME active (or vice-versa). The cache must be cleared so that
+        * if there are entries with the same physical address, both with and
+        * without the encryption bit, they don't race each other when flushed
+        * and potentially end up with the wrong entry being committed to
+        * memory.
+        */
+       if (boot_cpu_has(X86_FEATURE_SME))
+               native_wbinvd();
        for (;;) {
                /*
-                * Use wbinvd followed by hlt to stop the processor. This
-                * provides support for kexec on a processor that supports
-                * SME. With kexec, going from SME inactive to SME active
-                * requires clearing cache entries so that addresses without
-                * the encryption bit set don't corrupt the same physical
-                * address that has the encryption bit set when caches are
-                * flushed. To achieve this a wbinvd is performed followed by
-                * a hlt. Even if the processor is not in the kexec/SME
-                * scenario this only adds a wbinvd to a halting processor.
+                * Use native_halt() so that memory contents don't change
+                * (stack usage and variables) after possibly issuing the
+                * native_wbinvd() above.
                 */
-               asm volatile("wbinvd; hlt" : : : "memory");
+               native_halt();
        }
 }
 
index 45bf0c5..5224c60 100644 (file)
@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
index eeeb34f..c754662 100644 (file)
@@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all)
        unsigned int fsindex, gsindex;
        unsigned int ds, cs, es;
 
-       printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip);
-       printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
-               regs->sp, regs->flags);
+       show_iret_regs(regs);
+
        if (regs->orig_ax != -1)
                pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
        else
@@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
               regs->r13, regs->r14, regs->r15);
 
+       if (!all)
+               return;
+
        asm("movl %%ds,%0" : "=r" (ds));
        asm("movl %%cs,%0" : "=r" (cs));
        asm("movl %%es,%0" : "=r" (es));
@@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all)
        rdmsrl(MSR_GS_BASE, gs);
        rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 
-       if (!all)
-               return;
-
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = __read_cr3();
@@ -400,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
                     this_cpu_read(irq_count) != -1);
@@ -462,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Switch the PDA and FPU contexts.
         */
        this_cpu_write(current_task, next_p);
+       this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
 
        /* Reload sp0. */
        update_sp0(next_p);
index 8af2e8d..68d7ab8 100644 (file)
@@ -364,16 +364,6 @@ static void __init reserve_initrd(void)
            !ramdisk_image || !ramdisk_size)
                return;         /* No initrd provided by bootloader */
 
-       /*
-        * If SME is active, this memory will be marked encrypted by the
-        * kernel when it is accessed (including relocation). However, the
-        * ramdisk image was loaded decrypted by the bootloader, so make
-        * sure that it is encrypted before accessing it. For SEV the
-        * ramdisk will already be encrypted, so only do this for SME.
-        */
-       if (sme_active())
-               sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
-
        initrd_start = 0;
 
        mapped_size = memblock_mem_size(max_pfn_mapped);
@@ -906,9 +896,6 @@ void __init setup_arch(char **cmdline_p)
                set_bit(EFI_BOOT, &efi.flags);
                set_bit(EFI_64BIT, &efi.flags);
        }
-
-       if (efi_enabled(EFI_BOOT))
-               efi_memblock_x86_reserve_range();
 #endif
 
        x86_init.oem.arch_setup();
@@ -962,6 +949,8 @@ void __init setup_arch(char **cmdline_p)
 
        parse_early_param();
 
+       if (efi_enabled(EFI_BOOT))
+               efi_memblock_x86_reserve_range();
 #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Memory used by the kernel cannot be hot-removed because Linux
index 3d01df7..ed556d5 100644 (file)
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(__max_logical_packages);
 static unsigned int logical_packages __read_mostly;
 
 /* Maximum number of SMT threads on any online core */
-int __max_smt_threads __read_mostly;
+int __read_mostly __max_smt_threads = 1;
 
 /* Flag to indicate if a complete sched domain rebuild is required */
 bool x86_topology_update;
@@ -126,25 +126,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
        spin_lock_irqsave(&rtc_lock, flags);
        CMOS_WRITE(0xa, 0xf);
        spin_unlock_irqrestore(&rtc_lock, flags);
-       local_flush_tlb();
-       pr_debug("1.\n");
        *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
                                                        start_eip >> 4;
-       pr_debug("2.\n");
        *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
                                                        start_eip & 0xf;
-       pr_debug("3.\n");
 }
 
 static inline void smpboot_restore_warm_reset_vector(void)
 {
        unsigned long flags;
 
-       /*
-        * Install writable page 0 entry to set BIOS data area.
-        */
-       local_flush_tlb();
-
        /*
         * Paranoid:  Set warm reset code and vector here back
         * to default values.
@@ -237,7 +228,7 @@ static void notrace start_secondary(void *unused)
        load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 #endif
-
+       load_current_idt();
        cpu_init();
        x86_cpuinit.early_percpu_clock_init();
        preempt_disable();
@@ -932,12 +923,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
        initial_code = (unsigned long)start_secondary;
        initial_stack  = idle->thread.sp;
 
-       /*
-        * Enable the espfix hack for this CPU
-       */
-#ifdef CONFIG_X86_ESPFIX64
+       /* Enable the espfix hack for this CPU */
        init_espfix_ap(cpu);
-#endif
 
        /* So we see what's up */
        announce_cpu(cpu, apicid);
@@ -1304,7 +1291,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
         * Today neither Intel nor AMD support heterogenous systems so
         * extrapolate the boot cpu's data to all packages.
         */
-       ncpus = cpu_data(0).booted_cores * smp_num_siblings;
+       ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
        __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
        pr_info("Max logical packages: %u\n", __max_logical_packages);
 
index 77835bc..093f2ea 100644 (file)
@@ -102,7 +102,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
        for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
             unwind_next_frame(&state)) {
 
-               regs = unwind_get_entry_regs(&state);
+               regs = unwind_get_entry_regs(&state, NULL);
                if (regs) {
                        /*
                         * Kernel mode registers on the stack indicate an
@@ -164,8 +164,12 @@ int save_stack_trace_tsk_reliable(struct task_struct *tsk,
 {
        int ret;
 
+       /*
+        * If the task doesn't have a stack (e.g., a zombie), the stack is
+        * "reliably" empty.
+        */
        if (!try_get_task_stack(tsk))
-               return -EINVAL;
+               return 0;
 
        ret = __save_stack_trace_reliable(trace, tsk);
 
index a4eb279..a2486f4 100644 (file)
@@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
                return -1;
        set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
        pte_unmap(pte);
+
+       /*
+        * PTI poisons low addresses in the kernel page tables in the
+        * name of making them unusable for userspace.  To execute
+        * code at such a low address, the poison must be cleared.
+        *
+        * Note: 'pgd' actually gets set in p4d_alloc() _or_
+        * pud_alloc() depending on 4/5-level paging.
+        */
+       pgd->pgd &= ~_PAGE_NX;
+
        return 0;
 }
 
index 9a9c9b0..a5b802a 100644 (file)
@@ -93,17 +93,10 @@ static void set_tls_desc(struct task_struct *p, int idx,
        cpu = get_cpu();
 
        while (n-- > 0) {
-               if (LDT_empty(info) || LDT_zero(info)) {
+               if (LDT_empty(info) || LDT_zero(info))
                        memset(desc, 0, sizeof(*desc));
-               } else {
+               else
                        fill_ldt(desc, info);
-
-                       /*
-                        * Always set the accessed bit so that the CPU
-                        * doesn't try to write to the (read-only) GDT.
-                        */
-                       desc->type |= 1;
-               }
                ++info;
                ++desc;
        }
index 989514c..446c9ef 100644 (file)
@@ -51,6 +51,7 @@
 #include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/fpu/internal.h>
+#include <asm/cpu_entry_area.h>
 #include <asm/mce.h>
 #include <asm/fixmap.h>
 #include <asm/mach_traps.h>
@@ -348,23 +349,42 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
        /*
         * If IRET takes a non-IST fault on the espfix64 stack, then we
-        * end up promoting it to a doublefault.  In that case, modify
-        * the stack to make it look like we just entered the #GP
-        * handler from user space, similar to bad_iret.
+        * end up promoting it to a doublefault.  In that case, take
+        * advantage of the fact that we're not using the normal (TSS.sp0)
+        * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
+        * and then modify our own IRET frame so that, when we return,
+        * we land directly at the #GP(0) vector with the stack already
+        * set up according to its expectations.
+        *
+        * The net result is that our #GP handler will think that we
+        * entered from usermode with the bad user context.
         *
         * No need for ist_enter here because we don't use RCU.
         */
-       if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
+       if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
                regs->cs == __KERNEL_CS &&
                regs->ip == (unsigned long)native_irq_return_iret)
        {
-               struct pt_regs *normal_regs = task_pt_regs(current);
+               struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
-               /* Fake a #GP(0) from userspace. */
-               memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
-               normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
+               /*
+                * regs->sp points to the failing IRET frame on the
+                * ESPFIX64 stack.  Copy it to the entry stack.  This fills
+                * in gpregs->ss through gpregs->ip.
+                *
+                */
+               memmove(&gpregs->ip, (void *)regs->sp, 5*8);
+               gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
+
+               /*
+                * Adjust our frame so that we return straight to the #GP
+                * vector with the expected RSP value.  This is safe because
+                * we won't enable interupts or schedule before we invoke
+                * general_protection, so nothing will clobber the stack
+                * frame we just set up.
+                */
                regs->ip = (unsigned long)general_protection;
-               regs->sp = (unsigned long)&normal_regs->orig_ax;
+               regs->sp = (unsigned long)&gpregs->orig_ax;
 
                return;
        }
@@ -389,7 +409,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
         *
         *   Processors update CR2 whenever a page fault is detected. If a
         *   second page fault occurs while an earlier page fault is being
-        *   delivered, the faulting linear address of the second fault will
+        *   delivered, the faulting linear address of the second fault will
         *   overwrite the contents of CR2 (replacing the previous
         *   address). These updates to CR2 occur even if the page fault
         *   results in a double fault or occurs during the delivery of a
@@ -605,14 +625,15 @@ NOKPROBE_SYMBOL(do_int3);
 
 #ifdef CONFIG_X86_64
 /*
- * Help handler running on IST stack to switch off the IST stack if the
- * interrupted code was in user mode. The actual stack switch is done in
- * entry_64.S
+ * Help handler running on a per-cpu (IST or entry trampoline) stack
+ * to switch to the normal thread stack if the interrupted code was in
+ * user mode. The actual stack switch is done in entry_64.S
  */
 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
-       struct pt_regs *regs = task_pt_regs(current);
-       *regs = *eregs;
+       struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
+       if (regs != eregs)
+               *regs = *eregs;
        return regs;
 }
 NOKPROBE_SYMBOL(sync_regs);
@@ -628,13 +649,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
        /*
         * This is called from entry_64.S early in handling a fault
         * caused by a bad iret to user mode.  To handle the fault
-        * correctly, we want move our stack frame to task_pt_regs
-        * and we want to pretend that the exception came from the
-        * iret target.
+        * correctly, we want to move our stack frame to where it would
+        * be had we entered directly on the entry stack (rather than
+        * just below the IRET frame) and we want to pretend that the
+        * exception came from the IRET target.
         */
        struct bad_iret_stack *new_stack =
-               container_of(task_pt_regs(current),
-                            struct bad_iret_stack, regs);
+               (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
        /* Copy the IRET target to the new stack. */
        memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
@@ -795,14 +816,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        debug_stack_usage_dec();
 
 exit:
-#if defined(CONFIG_X86_32)
-       /*
-        * This is the most likely code path that involves non-trivial use
-        * of the SYSENTER stack.  Check that we haven't overrun it.
-        */
-       WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
-            "Overran or corrupted SYSENTER stack\n");
-#endif
        ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug);
@@ -929,6 +942,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 
 void __init trap_init(void)
 {
+       /* Init cpu_entry_area before IST entries are set up */
+       setup_cpu_entry_areas();
+
        idt_setup_traps();
 
        /*
@@ -936,8 +952,9 @@ void __init trap_init(void)
         * "sidt" instruction will not leak the location of the kernel, and
         * to defend the IDT against arbitrary memory write vulnerabilities.
         * It will be reloaded in cpu_init() */
-       __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
-       idt_descr.address = fix_to_virt(FIX_RO_IDT);
+       cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
+                   PAGE_KERNEL_RO);
+       idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
 
        /*
         * Should be a barrier for any external CPU state:
index 8ea117f..e169e85 100644 (file)
@@ -602,7 +602,6 @@ unsigned long native_calibrate_tsc(void)
                case INTEL_FAM6_KABYLAKE_DESKTOP:
                        crystal_khz = 24000;    /* 24.0 MHz */
                        break;
-               case INTEL_FAM6_SKYLAKE_X:
                case INTEL_FAM6_ATOM_DENVERTON:
                        crystal_khz = 25000;    /* 25.0 MHz */
                        break;
@@ -612,6 +611,8 @@ unsigned long native_calibrate_tsc(void)
                }
        }
 
+       if (crystal_khz == 0)
+               return 0;
        /*
         * TSC frequency determined by CPUID is a "hardware reported"
         * frequency and is the most accurate one so far we have. This
@@ -1315,6 +1316,12 @@ void __init tsc_init(void)
                (unsigned long)cpu_khz / 1000,
                (unsigned long)cpu_khz % 1000);
 
+       if (cpu_khz != tsc_khz) {
+               pr_info("Detected %lu.%03lu MHz TSC",
+                       (unsigned long)tsc_khz / 1000,
+                       (unsigned long)tsc_khz % 1000);
+       }
+
        /* Sanitize TSC ADJUST before cyc2ns gets initialized */
        tsc_store_and_check_tsc_adjust(true);
 
index a3f973b..1f9188f 100644 (file)
@@ -74,8 +74,50 @@ static struct orc_entry *orc_module_find(unsigned long ip)
 }
 #endif
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+static struct orc_entry *orc_find(unsigned long ip);
+
+/*
+ * Ftrace dynamic trampolines do not have orc entries of their own.
+ * But they are copies of the ftrace entries that are static and
+ * defined in ftrace_*.S, which do have orc entries.
+ *
+ * If the undwinder comes across a ftrace trampoline, then find the
+ * ftrace function that was used to create it, and use that ftrace
+ * function's orc entrie, as the placement of the return code in
+ * the stack will be identical.
+ */
+static struct orc_entry *orc_ftrace_find(unsigned long ip)
+{
+       struct ftrace_ops *ops;
+       unsigned long caller;
+
+       ops = ftrace_ops_trampoline(ip);
+       if (!ops)
+               return NULL;
+
+       if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+               caller = (unsigned long)ftrace_regs_call;
+       else
+               caller = (unsigned long)ftrace_call;
+
+       /* Prevent unlikely recursion */
+       if (ip == caller)
+               return NULL;
+
+       return orc_find(caller);
+}
+#else
+static struct orc_entry *orc_ftrace_find(unsigned long ip)
+{
+       return NULL;
+}
+#endif
+
 static struct orc_entry *orc_find(unsigned long ip)
 {
+       static struct orc_entry *orc;
+
        if (!orc_init)
                return NULL;
 
@@ -111,7 +153,11 @@ static struct orc_entry *orc_find(unsigned long ip)
                                  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
 
        /* Module lookup: */
-       return orc_module_find(ip);
+       orc = orc_module_find(ip);
+       if (orc)
+               return orc;
+
+       return orc_ftrace_find(ip);
 }
 
 static void orc_sort_swap(void *_a, void *_b, int size)
@@ -253,22 +299,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
        return NULL;
 }
 
-static bool stack_access_ok(struct unwind_state *state, unsigned long addr,
+static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
                            size_t len)
 {
        struct stack_info *info = &state->stack_info;
+       void *addr = (void *)_addr;
 
-       /*
-        * If the address isn't on the current stack, switch to the next one.
-        *
-        * We may have to traverse multiple stacks to deal with the possibility
-        * that info->next_sp could point to an empty stack and the address
-        * could be on a subsequent stack.
-        */
-       while (!on_stack(info, (void *)addr, len))
-               if (get_stack_info(info->next_sp, state->task, info,
-                                  &state->stack_mask))
-                       return false;
+       if (!on_stack(info, addr, len) &&
+           (get_stack_info(addr, state->task, info, &state->stack_mask)))
+               return false;
 
        return true;
 }
@@ -283,42 +322,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
        return true;
 }
 
-#define REGS_SIZE (sizeof(struct pt_regs))
-#define SP_OFFSET (offsetof(struct pt_regs, sp))
-#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip))
-#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip))
-
 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
-                            unsigned long *ip, unsigned long *sp, bool full)
+                            unsigned long *ip, unsigned long *sp)
 {
-       size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE;
-       size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET;
-       struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE);
-
-       if (IS_ENABLED(CONFIG_X86_64)) {
-               if (!stack_access_ok(state, addr, regs_size))
-                       return false;
+       struct pt_regs *regs = (struct pt_regs *)addr;
 
-               *ip = regs->ip;
-               *sp = regs->sp;
+       /* x86-32 support will be more complicated due to the &regs->sp hack */
+       BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
 
-               return true;
-       }
-
-       if (!stack_access_ok(state, addr, sp_offset))
+       if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
                return false;
 
        *ip = regs->ip;
+       *sp = regs->sp;
+       return true;
+}
 
-       if (user_mode(regs)) {
-               if (!stack_access_ok(state, addr + sp_offset,
-                                    REGS_SIZE - SP_OFFSET))
-                       return false;
+static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
+                                 unsigned long *ip, unsigned long *sp)
+{
+       struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
 
-               *sp = regs->sp;
-       } else
-               *sp = (unsigned long)&regs->sp;
+       if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
+               return false;
 
+       *ip = regs->ip;
+       *sp = regs->sp;
        return true;
 }
 
@@ -327,7 +356,6 @@ bool unwind_next_frame(struct unwind_state *state)
        unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
        enum stack_type prev_type = state->stack_info.type;
        struct orc_entry *orc;
-       struct pt_regs *ptregs;
        bool indirect = false;
 
        if (unwind_done(state))
@@ -435,7 +463,7 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_TYPE_REGS:
-               if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
+               if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
@@ -447,20 +475,14 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_TYPE_REGS_IRET:
-               if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
+               if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference iret registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
                }
 
-               ptregs = container_of((void *)sp, struct pt_regs, ip);
-               if ((unsigned long)ptregs >= prev_sp &&
-                   on_stack(&state->stack_info, ptregs, REGS_SIZE)) {
-                       state->regs = ptregs;
-                       state->full_regs = false;
-               } else
-                       state->regs = NULL;
-
+               state->regs = (void *)sp - IRET_FRAME_OFFSET;
+               state->full_regs = false;
                state->signal = true;
                break;
 
@@ -553,8 +575,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        }
 
        if (get_stack_info((unsigned long *)state->sp, state->task,
-                          &state->stack_info, &state->stack_mask))
-               return;
+                          &state->stack_info, &state->stack_mask)) {
+               /*
+                * We weren't on a valid stack.  It's possible that
+                * we overflowed a valid stack into a guard page.
+                * See if the next page up is valid so that we can
+                * generate some kind of backtrace if this happens.
+                */
+               void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
+               if (get_stack_info(next_page, state->task, &state->stack_info,
+                                  &state->stack_mask))
+                       return;
+       }
 
        /*
         * The caller can provide the address of the first frame directly
index a4009fb..9b138a0 100644 (file)
@@ -61,11 +61,17 @@ jiffies_64 = jiffies;
                . = ALIGN(HPAGE_SIZE);                          \
                __end_rodata_hpage_align = .;
 
+#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
+#define ALIGN_ENTRY_TEXT_END   . = ALIGN(PMD_SIZE);
+
 #else
 
 #define X64_ALIGN_RODATA_BEGIN
 #define X64_ALIGN_RODATA_END
 
+#define ALIGN_ENTRY_TEXT_BEGIN
+#define ALIGN_ENTRY_TEXT_END
+
 #endif
 
 PHDRS {
@@ -102,11 +108,28 @@ SECTIONS
                CPUIDLE_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               ALIGN_ENTRY_TEXT_BEGIN
                ENTRY_TEXT
                IRQENTRY_TEXT
+               ALIGN_ENTRY_TEXT_END
                SOFTIRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
+
+#ifdef CONFIG_X86_64
+               . = ALIGN(PAGE_SIZE);
+               _entry_trampoline = .;
+               *(.entry_trampoline)
+               . = ALIGN(PAGE_SIZE);
+               ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
+#endif
+
+#ifdef CONFIG_RETPOLINE
+               __indirect_thunk_start = .;
+               *(.text.__x86.indirect_thunk)
+               __indirect_thunk_end = .;
+#endif
+
                /* End of text section */
                _etext = .;
        } :text = 0x9090
index cdc70a3..c2cea66 100644 (file)
@@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
        [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
        [CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
        [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
-       [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
+       [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
        [CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
        [CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
        [CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
index 8079d14..b514b2b 100644 (file)
@@ -1046,7 +1046,6 @@ static void fetch_register_operand(struct operand *op)
 
 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
 {
-       ctxt->ops->get_fpu(ctxt);
        switch (reg) {
        case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
        case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
@@ -1068,13 +1067,11 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
 #endif
        default: BUG();
        }
-       ctxt->ops->put_fpu(ctxt);
 }
 
 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
                          int reg)
 {
-       ctxt->ops->get_fpu(ctxt);
        switch (reg) {
        case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
        case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
@@ -1096,12 +1093,10 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
 #endif
        default: BUG();
        }
-       ctxt->ops->put_fpu(ctxt);
 }
 
 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
 {
-       ctxt->ops->get_fpu(ctxt);
        switch (reg) {
        case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
        case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
@@ -1113,12 +1108,10 @@ static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
        case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
        default: BUG();
        }
-       ctxt->ops->put_fpu(ctxt);
 }
 
 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
 {
-       ctxt->ops->get_fpu(ctxt);
        switch (reg) {
        case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
        case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
@@ -1130,7 +1123,6 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
        case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
        default: BUG();
        }
-       ctxt->ops->put_fpu(ctxt);
 }
 
 static int em_fninit(struct x86_emulate_ctxt *ctxt)
@@ -1138,9 +1130,7 @@ static int em_fninit(struct x86_emulate_ctxt *ctxt)
        if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
                return emulate_nm(ctxt);
 
-       ctxt->ops->get_fpu(ctxt);
        asm volatile("fninit");
-       ctxt->ops->put_fpu(ctxt);
        return X86EMUL_CONTINUE;
 }
 
@@ -1151,9 +1141,7 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
        if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
                return emulate_nm(ctxt);
 
-       ctxt->ops->get_fpu(ctxt);
        asm volatile("fnstcw %0": "+m"(fcw));
-       ctxt->ops->put_fpu(ctxt);
 
        ctxt->dst.val = fcw;
 
@@ -1167,9 +1155,7 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
        if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
                return emulate_nm(ctxt);
 
-       ctxt->ops->get_fpu(ctxt);
        asm volatile("fnstsw %0": "+m"(fsw));
-       ctxt->ops->put_fpu(ctxt);
 
        ctxt->dst.val = fsw;
 
@@ -2404,9 +2390,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 }
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
-                                    u64 cr0, u64 cr4)
+                                   u64 cr0, u64 cr3, u64 cr4)
 {
        int bad;
+       u64 pcid;
+
+       /* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
+       pcid = 0;
+       if (cr4 & X86_CR4_PCIDE) {
+               pcid = cr3 & 0xfff;
+               cr3 &= ~0xfff;
+       }
+
+       bad = ctxt->ops->set_cr(ctxt, 3, cr3);
+       if (bad)
+               return X86EMUL_UNHANDLEABLE;
 
        /*
         * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@@ -2425,6 +2423,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                bad = ctxt->ops->set_cr(ctxt, 4, cr4);
                if (bad)
                        return X86EMUL_UNHANDLEABLE;
+               if (pcid) {
+                       bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
+                       if (bad)
+                               return X86EMUL_UNHANDLEABLE;
+               }
+
        }
 
        return X86EMUL_CONTINUE;
@@ -2435,11 +2439,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        struct desc_struct desc;
        struct desc_ptr dt;
        u16 selector;
-       u32 val, cr0, cr4;
+       u32 val, cr0, cr3, cr4;
        int i;
 
        cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-       ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
+       cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
        ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
        ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
 
@@ -2481,14 +2485,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
        ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
 
-       return rsm_enter_protected_mode(ctxt, cr0, cr4);
+       return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
-       u64 val, cr0, cr4;
+       u64 val, cr0, cr3, cr4;
        u32 base3;
        u16 selector;
        int i, r;
@@ -2505,7 +2509,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
        cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-       ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
+       cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
        cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
        ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
        val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
@@ -2533,7 +2537,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+       r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
        if (r != X86EMUL_CONTINUE)
                return r;
 
@@ -4001,12 +4005,8 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       ctxt->ops->get_fpu(ctxt);
-
        rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
 
-       ctxt->ops->put_fpu(ctxt);
-
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
@@ -4014,6 +4014,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
                                   fxstate_size(ctxt));
 }
 
+/*
+ * FXRSTOR might restore XMM registers not provided by the guest. Fill
+ * in the host registers (via FXSAVE) instead, so they won't be modified.
+ * (preemption has to stay disabled until FXRSTOR).
+ *
+ * Use noinline to keep the stack for other functions called by callers small.
+ */
+static noinline int fxregs_fixup(struct fxregs_state *fx_state,
+                                const size_t used_size)
+{
+       struct fxregs_state fx_tmp;
+       int rc;
+
+       rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
+       memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
+              __fxstate_size(16) - used_size);
+
+       return rc;
+}
+
 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
 {
        struct fxregs_state fx_state;
@@ -4024,19 +4044,17 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       ctxt->ops->get_fpu(ctxt);
-
        size = fxstate_size(ctxt);
+       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+
        if (size < __fxstate_size(16)) {
-               rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+               rc = fxregs_fixup(&fx_state, size);
                if (rc != X86EMUL_CONTINUE)
                        goto out;
        }
 
-       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
-       if (rc != X86EMUL_CONTINUE)
-               goto out;
-
        if (fx_state.mxcsr >> 16) {
                rc = emulate_gp(ctxt, 0);
                goto out;
@@ -4046,8 +4064,6 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
                rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
 
 out:
-       ctxt->ops->put_fpu(ctxt);
-
        return rc;
 }
 
@@ -5000,6 +5016,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        bool op_prefix = false;
        bool has_seg_override = false;
        struct opcode opcode;
+       u16 dummy;
+       struct desc_struct desc;
 
        ctxt->memop.type = OP_NONE;
        ctxt->memopp = NULL;
@@ -5018,6 +5036,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        switch (mode) {
        case X86EMUL_MODE_REAL:
        case X86EMUL_MODE_VM86:
+               def_op_bytes = def_ad_bytes = 2;
+               ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
+               if (desc.d)
+                       def_op_bytes = def_ad_bytes = 4;
+               break;
        case X86EMUL_MODE_PROT16:
                def_op_bytes = def_ad_bytes = 2;
                break;
@@ -5290,9 +5313,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
 {
        int rc;
 
-       ctxt->ops->get_fpu(ctxt);
        rc = asm_safe("fwait");
-       ctxt->ops->put_fpu(ctxt);
 
        if (unlikely(rc != X86EMUL_CONTINUE))
                return emulate_exception(ctxt, MF_VECTOR, 0, false);
index bdff437..4e822ad 100644 (file)
@@ -209,12 +209,12 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
 
        old_irr = ioapic->irr;
        ioapic->irr |= mask;
-       if (edge)
+       if (edge) {
                ioapic->irr_delivered &= ~mask;
-       if ((edge && old_irr == ioapic->irr) ||
-           (!edge && entry.fields.remote_irr)) {
-               ret = 0;
-               goto out;
+               if (old_irr == ioapic->irr) {
+                       ret = 0;
+                       goto out;
+               }
        }
 
        ret = ioapic_service(ioapic, irq, line_status);
@@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
                    index == RTC_GSI) {
                        if (kvm_apic_match_dest(vcpu, NULL, 0,
                                     e->fields.dest_id, e->fields.dest_mode) ||
-                           (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
-                            kvm_apic_pending_eoi(vcpu, e->fields.vector)))
+                           kvm_apic_pending_eoi(vcpu, e->fields.vector))
                                __set_bit(e->fields.vector,
                                          ioapic_handled_vectors);
                }
@@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 {
        unsigned index;
        bool mask_before, mask_after;
+       int old_remote_irr, old_delivery_status;
        union kvm_ioapic_redirect_entry *e;
 
        switch (ioapic->ioregsel) {
@@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                        return;
                e = &ioapic->redirtbl[index];
                mask_before = e->fields.mask;
+               /* Preserve read-only fields */
+               old_remote_irr = e->fields.remote_irr;
+               old_delivery_status = e->fields.delivery_status;
                if (ioapic->ioregsel & 1) {
                        e->bits &= 0xffffffff;
                        e->bits |= (u64) val << 32;
                } else {
                        e->bits &= ~0xffffffffULL;
                        e->bits |= (u32) val;
-                       e->fields.remote_irr = 0;
                }
+               e->fields.remote_irr = old_remote_irr;
+               e->fields.delivery_status = old_delivery_status;
+
+               /*
+                * Some OSes (Linux, Xen) assume that Remote IRR bit will
+                * be cleared by IOAPIC hardware when the entry is configured
+                * as edge-triggered. This behavior is used to simulate an
+                * explicit EOI on IOAPICs that don't have the EOI register.
+                */
+               if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
+                       e->fields.remote_irr = 0;
+
                mask_after = e->fields.mask;
                if (mask_before != mask_after)
                        kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
@@ -324,7 +338,9 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
        struct kvm_lapic_irq irqe;
        int ret;
 
-       if (entry->fields.mask)
+       if (entry->fields.mask ||
+           (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
+           entry->fields.remote_irr))
                return -1;
 
        ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
index 943acbf..e2c1fb8 100644 (file)
@@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
        recalculate_apic_map(apic->vcpu->kvm);
 }
 
+static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
+{
+       return ((id >> 4) << 16) | (1 << (id & 0xf));
+}
+
 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 {
-       u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
+       u32 ldr = kvm_apic_calc_x2apic_ldr(id);
 
        WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
 
@@ -2245,6 +2250,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
 {
        if (apic_x2apic_mode(vcpu->arch.apic)) {
                u32 *id = (u32 *)(s->regs + APIC_ID);
+               u32 *ldr = (u32 *)(s->regs + APIC_LDR);
 
                if (vcpu->kvm->arch.x2apic_format) {
                        if (*id != vcpu->vcpu_id)
@@ -2255,6 +2261,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
                        else
                                *id <<= 24;
                }
+
+               /* In x2APIC mode, the LDR is fixed and based on the id */
+               if (set)
+                       *ldr = kvm_apic_calc_x2apic_ldr(*id);
        }
 
        return 0;
index e5e66e5..2b8eb4d 100644 (file)
@@ -3395,7 +3395,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if(make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, 0, 0,
                                vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
@@ -3410,7 +3410,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                        spin_lock(&vcpu->kvm->mmu_lock);
                        if (make_mmu_pages_available(vcpu) < 0) {
                                spin_unlock(&vcpu->kvm->mmu_lock);
-                               return 1;
+                               return -ENOSPC;
                        }
                        sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
                                        i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
@@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if (make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
@@ -3487,7 +3487,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if (make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
                                      0, ACC_ALL);
@@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
 {
        if (unlikely(!lapic_in_kernel(vcpu) ||
-                    kvm_event_needs_reinjection(vcpu)))
+                    kvm_event_needs_reinjection(vcpu) ||
+                    vcpu->arch.exception.pending))
                return false;
 
        if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
@@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void)
 
 int kvm_mmu_module_init(void)
 {
+       int ret = -ENOMEM;
+
        kvm_mmu_clear_all_pte_masks();
 
        pte_list_desc_cache = kmem_cache_create("pte_list_desc",
                                            sizeof(struct pte_list_desc),
                                            0, SLAB_ACCOUNT, NULL);
        if (!pte_list_desc_cache)
-               goto nomem;
+               goto out;
 
        mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
                                                  sizeof(struct kvm_mmu_page),
                                                  0, SLAB_ACCOUNT, NULL);
        if (!mmu_page_header_cache)
-               goto nomem;
+               goto out;
 
        if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
-               goto nomem;
+               goto out;
 
-       register_shrinker(&mmu_shrinker);
+       ret = register_shrinker(&mmu_shrinker);
+       if (ret)
+               goto out;
 
        return 0;
 
-nomem:
+out:
        mmu_destroy_caches();
-       return -ENOMEM;
+       return ret;
 }
 
 /*
index 59e13a7..f40d0da 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -2197,6 +2198,8 @@ static int ud_interception(struct vcpu_svm *svm)
        int er;
 
        er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
+       if (er == EMULATE_USER_EXIT)
+               return 0;
        if (er != EMULATE_DONE)
                kvm_queue_exception(&svm->vcpu, UD_VECTOR);
        return 1;
@@ -4976,6 +4979,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
                "mov %%r13, %c[r13](%[svm]) \n\t"
                "mov %%r14, %c[r14](%[svm]) \n\t"
                "mov %%r15, %c[r15](%[svm]) \n\t"
+#endif
+               /*
+               * Clear host registers marked as clobbered to prevent
+               * speculative use.
+               */
+               "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
+               "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
+               "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
+               "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
+               "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
+#ifdef CONFIG_X86_64
+               "xor %%r8, %%r8 \n\t"
+               "xor %%r9, %%r9 \n\t"
+               "xor %%r10, %%r10 \n\t"
+               "xor %%r11, %%r11 \n\t"
+               "xor %%r12, %%r12 \n\t"
+               "xor %%r13, %%r13 \n\t"
+               "xor %%r14, %%r14 \n\t"
+               "xor %%r15, %%r15 \n\t"
 #endif
                "pop %%" _ASM_BP
                :
@@ -5006,6 +5028,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
                );
 
+       /* Eliminate branch target predictions from guest mode */
+       vmexit_fill_RSB();
+
 #ifdef CONFIG_X86_64
        wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
index 714a067..c829d89 100644 (file)
@@ -50,6 +50,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
+#include <asm/nospec-branch.h>
 
 #include "trace.h"
 #include "pmu.h"
@@ -899,8 +900,16 @@ static inline short vmcs_field_to_offset(unsigned long field)
 {
        BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
 
-       if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
-           vmcs_field_to_offset_table[field] == 0)
+       if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
+               return -ENOENT;
+
+       /*
+        * FIXME: Mitigation for CVE-2017-5753.  To be replaced with a
+        * generic mechanism.
+        */
+       asm("lfence");
+
+       if (vmcs_field_to_offset_table[field] == 0)
                return -ENOENT;
 
        return vmcs_field_to_offset_table[field];
@@ -2300,7 +2309,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 * processors.  See 22.2.4.
                 */
                vmcs_writel(HOST_TR_BASE,
-                           (unsigned long)this_cpu_ptr(&cpu_tss));
+                           (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
                vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
 
                /*
@@ -5600,7 +5609,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
        }
 
-       vmcs_writel(GUEST_RFLAGS, 0x02);
+       kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
        kvm_rip_write(vcpu, 0xfff0);
 
        vmcs_writel(GUEST_GDTR_BASE, 0);
@@ -5915,11 +5924,9 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                return 1;  /* already handled by vmx_vcpu_run() */
 
        if (is_invalid_opcode(intr_info)) {
-               if (is_guest_mode(vcpu)) {
-                       kvm_queue_exception(vcpu, UD_VECTOR);
-                       return 1;
-               }
                er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+               if (er == EMULATE_USER_EXIT)
+                       return 0;
                if (er != EMULATE_DONE)
                        kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
@@ -6602,7 +6609,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                if (kvm_test_request(KVM_REQ_EVENT, vcpu))
                        return 1;
 
-               err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
+               err = emulate_instruction(vcpu, 0);
 
                if (err == EMULATE_USER_EXIT) {
                        ++vcpu->stat.mmio_exits;
@@ -6750,16 +6757,10 @@ static __init int hardware_setup(void)
                        goto out;
        }
 
-       vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
        memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
        memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
 
-       /*
-        * Allow direct access to the PC debug port (it is often used for I/O
-        * delays, but the vmexits simply slow things down).
-        */
        memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
-       clear_bit(0x80, vmx_io_bitmap_a);
 
        memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
 
@@ -7414,10 +7415,11 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
  */
 static void free_nested(struct vcpu_vmx *vmx)
 {
-       if (!vmx->nested.vmxon)
+       if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
        vmx->nested.vmxon = false;
+       vmx->nested.smm.vmxon = false;
        free_vpid(vmx->nested.vpid02);
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.current_vmptr = -1ull;
@@ -9419,6 +9421,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                /* Save guest registers, load host registers, keep flags */
                "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
                "pop %0 \n\t"
+               "setbe %c[fail](%0)\n\t"
                "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
                "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
                __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
@@ -9435,12 +9438,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                "mov %%r13, %c[r13](%0) \n\t"
                "mov %%r14, %c[r14](%0) \n\t"
                "mov %%r15, %c[r15](%0) \n\t"
+               "xor %%r8d,  %%r8d \n\t"
+               "xor %%r9d,  %%r9d \n\t"
+               "xor %%r10d, %%r10d \n\t"
+               "xor %%r11d, %%r11d \n\t"
+               "xor %%r12d, %%r12d \n\t"
+               "xor %%r13d, %%r13d \n\t"
+               "xor %%r14d, %%r14d \n\t"
+               "xor %%r15d, %%r15d \n\t"
 #endif
                "mov %%cr2, %%" _ASM_AX "   \n\t"
                "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
 
+               "xor %%eax, %%eax \n\t"
+               "xor %%ebx, %%ebx \n\t"
+               "xor %%esi, %%esi \n\t"
+               "xor %%edi, %%edi \n\t"
                "pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
-               "setbe %c[fail](%0) \n\t"
                ".pushsection .rodata \n\t"
                ".global vmx_return \n\t"
                "vmx_return: " _ASM_PTR " 2b \n\t"
@@ -9477,6 +9491,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
              );
 
+       /* Eliminate branch target predictions from guest mode */
+       vmexit_fill_RSB();
+
        /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
        if (debugctlmsr)
                update_debugctlmsr(debugctlmsr);
@@ -9800,8 +9817,7 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
        cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
        cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
        cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
-       /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
-       cr4_fixed1_update(bit(11),            ecx, bit(2));
+       cr4_fixed1_update(X86_CR4_UMIP,       ecx, bit(X86_FEATURE_UMIP));
 
 #undef cr4_fixed1_update
 }
@@ -10875,6 +10891,11 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                        return 1;
        }
 
+       if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+               (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+               (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+                       return 1;
+
        return 0;
 }
 
@@ -11099,13 +11120,12 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qual;
-
-       if (kvm_event_needs_reinjection(vcpu))
-               return -EBUSY;
+       bool block_nested_events =
+           vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
 
        if (vcpu->arch.exception.pending &&
                nested_vmx_check_exception(vcpu, &exit_qual)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
                vcpu->arch.exception.pending = false;
@@ -11114,14 +11134,14 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 
        if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
            vmx->nested.preemption_timer_expired) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
                return 0;
        }
 
        if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
                                  NMI_VECTOR | INTR_TYPE_NMI_INTR |
@@ -11137,7 +11157,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 
        if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
            nested_exit_on_intr(vcpu)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
                return 0;
@@ -11324,6 +11344,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        kvm_clear_interrupt_queue(vcpu);
 }
 
+static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
+                       struct vmcs12 *vmcs12)
+{
+       u32 entry_failure_code;
+
+       nested_ept_uninit_mmu_context(vcpu);
+
+       /*
+        * Only PDPTE load can fail as the value of cr3 was checked on entry and
+        * couldn't have changed.
+        */
+       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+               nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+
+       if (!enable_ept)
+               vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+}
+
 /*
  * A part of what we need to when the nested L2 guest exits and we want to
  * run its L1 parent, is to reset L1's guest state to the host state specified
@@ -11337,7 +11375,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                                   struct vmcs12 *vmcs12)
 {
        struct kvm_segment seg;
-       u32 entry_failure_code;
 
        if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
                vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -11364,17 +11401,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
        vmx_set_cr4(vcpu, vmcs12->host_cr4);
 
-       nested_ept_uninit_mmu_context(vcpu);
-
-       /*
-        * Only PDPTE load can fail as the value of cr3 was checked on entry and
-        * couldn't have changed.
-        */
-       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
-               nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
-
-       if (!enable_ept)
-               vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+       load_vmcs12_mmu_host_state(vcpu, vmcs12);
 
        if (enable_vpid) {
                /*
@@ -11604,6 +11631,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
         * accordingly.
         */
        nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+       load_vmcs12_mmu_host_state(vcpu, vmcs12);
+
        /*
         * The emulated instruction was already skipped in
         * nested_vmx_run, but the updated RIP was never
index 34c85aa..c53298d 100644 (file)
@@ -107,6 +107,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
 static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
+static bool __read_mostly report_ignored_msrs = true;
+module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
+
 unsigned int min_timer_period_us = 500;
 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
 
@@ -1795,10 +1798,13 @@ u64 get_kvmclock_ns(struct kvm *kvm)
        /* both __this_cpu_read() and rdtsc() should be on the same cpu */
        get_cpu();
 
-       kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
-                          &hv_clock.tsc_shift,
-                          &hv_clock.tsc_to_system_mul);
-       ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+       if (__this_cpu_read(cpu_tsc_khz)) {
+               kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+                                  &hv_clock.tsc_shift,
+                                  &hv_clock.tsc_to_system_mul);
+               ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+       } else
+               ret = ktime_get_boot_ns() + ka->kvmclock_offset;
 
        put_cpu();
 
@@ -1830,6 +1836,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
         */
        BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
+       if (guest_hv_clock.version & 1)
+               ++guest_hv_clock.version;  /* first time write, random junk */
+
        vcpu->hv_clock.version = guest_hv_clock.version + 1;
        kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
                                &vcpu->hv_clock,
@@ -2322,7 +2331,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                /* Drop writes to this legacy MSR -- see rdmsr
                 * counterpart for further detail.
                 */
-               vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
+               if (report_ignored_msrs)
+                       vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
+                               msr, data);
                break;
        case MSR_AMD64_OSVW_ID_LENGTH:
                if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
@@ -2359,8 +2370,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                    msr, data);
                        return 1;
                } else {
-                       vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
-                                   msr, data);
+                       if (report_ignored_msrs)
+                               vcpu_unimpl(vcpu,
+                                       "ignored wrmsr: 0x%x data 0x%llx\n",
+                                       msr, data);
                        break;
                }
        }
@@ -2578,7 +2591,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                               msr_info->index);
                        return 1;
                } else {
-                       vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
+                       if (report_ignored_msrs)
+                               vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
+                                       msr_info->index);
                        msr_info->data = 0;
                }
                break;
@@ -2922,7 +2937,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        pagefault_enable();
        kvm_x86_ops->vcpu_put(vcpu);
-       kvm_put_guest_fpu(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
 }
 
@@ -4370,7 +4384,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
                                         addr, n, v))
                    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
                        break;
-               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
                handled += n;
                addr += n;
                len -= n;
@@ -4629,7 +4643,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 {
        if (vcpu->mmio_read_completed) {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-                              vcpu->mmio_fragments[0].gpa, *(u64 *)val);
+                              vcpu->mmio_fragments[0].gpa, val);
                vcpu->mmio_read_completed = 0;
                return 1;
        }
@@ -4651,14 +4665,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
+       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
        return vcpu_mmio_write(vcpu, gpa, bytes, val);
 }
 
 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
                          void *val, int bytes)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
+       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
        return X86EMUL_IO_NEEDED;
 }
 
@@ -5237,17 +5251,6 @@ static void emulator_halt(struct x86_emulate_ctxt *ctxt)
        emul_to_vcpu(ctxt)->arch.halt_request = 1;
 }
 
-static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
-{
-       preempt_disable();
-       kvm_load_guest_fpu(emul_to_vcpu(ctxt));
-}
-
-static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
-{
-       preempt_enable();
-}
-
 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
                              struct x86_instruction_info *info,
                              enum x86_intercept_stage stage)
@@ -5325,8 +5328,6 @@ static const struct x86_emulate_ops emulate_ops = {
        .halt                = emulator_halt,
        .wbinvd              = emulator_wbinvd,
        .fix_hypercall       = emulator_fix_hypercall,
-       .get_fpu             = emulator_get_fpu,
-       .put_fpu             = emulator_put_fpu,
        .intercept           = emulator_intercept,
        .get_cpuid           = emulator_get_cpuid,
        .set_nmi_mask        = emulator_set_nmi_mask,
@@ -5430,7 +5431,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
                vcpu->run->internal.ndata = 0;
-               r = EMULATE_FAIL;
+               r = EMULATE_USER_EXIT;
        }
        kvm_queue_exception(vcpu, UD_VECTOR);
 
@@ -5722,6 +5723,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                        if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
                                                emulation_type))
                                return EMULATE_DONE;
+                       if (ctxt->have_exception && inject_emulated_exception(vcpu))
+                               return EMULATE_DONE;
                        if (emulation_type & EMULTYPE_SKIP)
                                return EMULATE_FAIL;
                        return handle_emulation_failure(vcpu);
@@ -6761,6 +6764,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
        kvm_x86_ops->tlb_flush(vcpu);
 }
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end)
+{
+       unsigned long apic_address;
+
+       /*
+        * The physical address of apic access page is stored in the VMCS.
+        * Update it when it becomes invalid.
+        */
+       apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+       if (start <= apic_address && apic_address < end)
+               kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+}
+
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
        struct page *page = NULL;
@@ -6935,7 +6952,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
-       kvm_load_guest_fpu(vcpu);
 
        /*
         * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
@@ -7248,14 +7264,11 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-       struct fpu *fpu = &current->thread.fpu;
        int r;
-       sigset_t sigsaved;
 
-       fpu__initialize(fpu);
+       kvm_sigset_activate(vcpu);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_load_guest_fpu(vcpu);
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
@@ -7297,9 +7310,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                r = vcpu_run(vcpu);
 
 out:
+       kvm_put_guest_fpu(vcpu);
        post_kvm_run_save(vcpu);
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
@@ -7367,7 +7380,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 #endif
 
        kvm_rip_write(vcpu, regs->rip);
-       kvm_set_rflags(vcpu, regs->rflags);
+       kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
 
        vcpu->arch.exception.pending = false;
 
@@ -7481,6 +7494,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
 
+int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
+               /*
+                * When EFER.LME and CR0.PG are set, the processor is in
+                * 64-bit mode (though maybe in a 32-bit code segment).
+                * CR4.PAE and EFER.LMA must be set.
+                */
+               if (!(sregs->cr4 & X86_CR4_PAE)
+                   || !(sregs->efer & EFER_LMA))
+                       return -EINVAL;
+       } else {
+               /*
+                * Not in 64-bit mode: EFER.LMA is clear and the code
+                * segment cannot be 64-bit.
+                */
+               if (sregs->efer & EFER_LMA || sregs->cs.l)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
@@ -7493,6 +7529,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                        (sregs->cr4 & X86_CR4_OSXSAVE))
                return -EINVAL;
 
+       if (kvm_valid_sregs(vcpu, sregs))
+               return -EINVAL;
+
        apic_base_msr.data = sregs->apic_base;
        apic_base_msr.host_initiated = true;
        if (kvm_set_apic_base(vcpu, &apic_base_msr))
@@ -7690,32 +7729,25 @@ static void fx_init(struct kvm_vcpu *vcpu)
        vcpu->arch.cr0 |= X86_CR0_ET;
 }
 
+/* Swap (qemu) user FPU context for the guest FPU context. */
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->guest_fpu_loaded)
-               return;
-
-       /*
-        * Restore all possible states in the guest,
-        * and assume host would use all available bits.
-        * Guest xcr0 would be loaded later.
-        */
-       vcpu->guest_fpu_loaded = 1;
-       __kernel_fpu_begin();
+       preempt_disable();
+       copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
        /* PKRU is separately restored in kvm_x86_ops->run.  */
        __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
                                ~XFEATURE_MASK_PKRU);
+       preempt_enable();
        trace_kvm_fpu(1);
 }
 
+/* When vcpu_run ends, restore user space FPU context. */
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->guest_fpu_loaded)
-               return;
-
-       vcpu->guest_fpu_loaded = 0;
+       preempt_disable();
        copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
-       __kernel_fpu_end();
+       copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
+       preempt_enable();
        ++vcpu->stat.fpu_reload;
        trace_kvm_fpu(0);
 }
@@ -7832,7 +7864,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                 * To avoid have the INIT path from kvm_apic_has_events() that be
                 * called with loaded FPU and does not let userspace fix the state.
                 */
-               kvm_put_guest_fpu(vcpu);
+               if (init_event)
+                       kvm_put_guest_fpu(vcpu);
                mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
                                        XFEATURE_MASK_BNDREGS);
                if (mpx_state_buffer)
@@ -7841,6 +7874,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                                        XFEATURE_MASK_BNDCSR);
                if (mpx_state_buffer)
                        memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
+               if (init_event)
+                       kvm_load_guest_fpu(vcpu);
        }
 
        if (!init_event) {
index 7b181b6..f23934b 100644 (file)
@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+lib-$(CONFIG_RETPOLINE) += retpoline.o
 
 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
 
index 4d34bb5..46e71a7 100644 (file)
@@ -29,7 +29,8 @@
 #include <asm/errno.h>
 #include <asm/asm.h>
 #include <asm/export.h>
-                               
+#include <asm/nospec-branch.h>
+
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
  */
@@ -156,7 +157,7 @@ ENTRY(csum_partial)
        negl %ebx
        lea 45f(%ebx,%ebx,2), %ebx
        testl %esi, %esi
-       jmp *%ebx
+       JMP_NOSPEC %ebx
 
        # Handle 2-byte-aligned regions
 20:    addw (%esi), %ax
@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic)
        andl $-32,%edx
        lea 3f(%ebx,%ebx), %ebx
        testl %esi, %esi 
-       jmp *%ebx
+       JMP_NOSPEC %ebx
 1:     addl $64,%esi
        addl $64,%edi 
        SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
index 553f8fd..4846eff 100644 (file)
@@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops)
                delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
 
                /*
-                * Use cpu_tss as a cacheline-aligned, seldomly
+                * Use cpu_tss_rw as a cacheline-aligned, seldomly
                 * accessed per-cpu variable as the monitor target.
                 */
-               __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
+               __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
                /*
                 * AMD, like Intel, supports the EAX hint and EAX=0xf
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
new file mode 100644 (file)
index 0000000..c909961
--- /dev/null
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/stringify.h>
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+#include <asm/export.h>
+#include <asm/nospec-branch.h>
+
+.macro THUNK reg
+       .section .text.__x86.indirect_thunk
+
+ENTRY(__x86_indirect_thunk_\reg)
+       CFI_STARTPROC
+       JMP_NOSPEC %\reg
+       CFI_ENDPROC
+ENDPROC(__x86_indirect_thunk_\reg)
+.endm
+
+/*
+ * Despite being an assembler file we can't just use .irp here
+ * because __KSYM_DEPS__ only uses the C preprocessor and would
+ * only see one instance of "__x86_indirect_thunk_\reg" rather
+ * than one per register with the correct names. So we do it
+ * the simple and nasty way...
+ */
+#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
+#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
+#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
+
+GENERATE_THUNK(_ASM_AX)
+GENERATE_THUNK(_ASM_BX)
+GENERATE_THUNK(_ASM_CX)
+GENERATE_THUNK(_ASM_DX)
+GENERATE_THUNK(_ASM_SI)
+GENERATE_THUNK(_ASM_DI)
+GENERATE_THUNK(_ASM_BP)
+#ifdef CONFIG_64BIT
+GENERATE_THUNK(r8)
+GENERATE_THUNK(r9)
+GENERATE_THUNK(r10)
+GENERATE_THUNK(r11)
+GENERATE_THUNK(r12)
+GENERATE_THUNK(r13)
+GENERATE_THUNK(r14)
+GENERATE_THUNK(r15)
+#endif
index c4d5591..e0b8593 100644 (file)
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
 fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
 fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
 fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
+ff: UD0
 EndTable
 
 Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
 7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
 80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
+81: INVVPID Gy,Mdq (66)
 82: INVPCID Gy,Mdq (66)
 83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
 88: vexpandps/d Vpd,Wpd (66),(ev)
@@ -970,6 +970,15 @@ GrpTable: Grp9
 EndTable
 
 GrpTable: Grp10
+# all are UD1
+0: UD1
+1: UD1
+2: UD1
+3: UD1
+4: UD1
+5: UD1
+6: UD1
+7: UD1
 EndTable
 
 # Grp11A and Grp11B are expressed as Grp11 in Intel SDM
index 8e13b8c..27e9e90 100644 (file)
@@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o   = -pg
 endif
 
 obj-y  :=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-           pat.o pgtable.o physaddr.o setup_nx.o tlb.o
+           pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
@@ -41,9 +41,10 @@ obj-$(CONFIG_AMD_NUMA)               += amdtopology.o
 obj-$(CONFIG_ACPI_NUMA)                += srat.o
 obj-$(CONFIG_NUMA_EMU)         += numa_emulation.o
 
-obj-$(CONFIG_X86_INTEL_MPX)    += mpx.o
-obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
-obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
+obj-$(CONFIG_X86_INTEL_MPX)                    += mpx.o
+obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
+obj-$(CONFIG_RANDOMIZE_MEMORY)                 += kaslr.o
+obj-$(CONFIG_PAGE_TABLE_ISOLATION)             += pti.o
 
 obj-$(CONFIG_AMD_MEM_ENCRYPT)  += mem_encrypt.o
 obj-$(CONFIG_AMD_MEM_ENCRYPT)  += mem_encrypt_boot.o
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
new file mode 100644 (file)
index 0000000..b9283cc
--- /dev/null
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+
+#include <asm/cpu_entry_area.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+#include <asm/desc.h>
+
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
+
+#ifdef CONFIG_X86_64
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+#endif
+
+struct cpu_entry_area *get_cpu_entry_area(int cpu)
+{
+       unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
+       BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
+
+       return (struct cpu_entry_area *) va;
+}
+EXPORT_SYMBOL(get_cpu_entry_area);
+
+void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
+{
+       unsigned long va = (unsigned long) cea_vaddr;
+
+       set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
+}
+
+static void __init
+cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
+{
+       for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
+               cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
+}
+
+static void percpu_setup_debug_store(int cpu)
+{
+#ifdef CONFIG_CPU_SUP_INTEL
+       int npages;
+       void *cea;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return;
+
+       cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
+       npages = sizeof(struct debug_store) / PAGE_SIZE;
+       BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
+       cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
+                            PAGE_KERNEL);
+
+       cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
+       /*
+        * Force the population of PMDs for not yet allocated per cpu
+        * memory like debug store buffers.
+        */
+       npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
+       for (; npages; npages--, cea += PAGE_SIZE)
+               cea_set_pte(cea, 0, PAGE_NONE);
+#endif
+}
+
+/* Setup the fixmap mappings only once per-processor */
+static void __init setup_cpu_entry_area(int cpu)
+{
+#ifdef CONFIG_X86_64
+       extern char _entry_trampoline[];
+
+       /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
+       pgprot_t gdt_prot = PAGE_KERNEL_RO;
+       pgprot_t tss_prot = PAGE_KERNEL_RO;
+#else
+       /*
+        * On native 32-bit systems, the GDT cannot be read-only because
+        * our double fault handler uses a task gate, and entering through
+        * a task gate needs to change an available TSS to busy.  If the
+        * GDT is read-only, that will triple fault.  The TSS cannot be
+        * read-only because the CPU writes to it on task switches.
+        *
+        * On Xen PV, the GDT must be read-only because the hypervisor
+        * requires it.
+        */
+       pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
+               PAGE_KERNEL_RO : PAGE_KERNEL;
+       pgprot_t tss_prot = PAGE_KERNEL;
+#endif
+
+       cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
+                   gdt_prot);
+
+       cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
+                            per_cpu_ptr(&entry_stack_storage, cpu), 1,
+                            PAGE_KERNEL);
+
+       /*
+        * The Intel SDM says (Volume 3, 7.2.1):
+        *
+        *  Avoid placing a page boundary in the part of the TSS that the
+        *  processor reads during a task switch (the first 104 bytes). The
+        *  processor may not correctly perform address translations if a
+        *  boundary occurs in this area. During a task switch, the processor
+        *  reads and writes into the first 104 bytes of each TSS (using
+        *  contiguous physical addresses beginning with the physical address
+        *  of the first byte of the TSS). So, after TSS access begins, if
+        *  part of the 104 bytes is not physically contiguous, the processor
+        *  will access incorrect information without generating a page-fault
+        *  exception.
+        *
+        * There are also a lot of errata involving the TSS spanning a page
+        * boundary.  Assert that we're not doing that.
+        */
+       BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
+                     offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
+       BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+       cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
+                            &per_cpu(cpu_tss_rw, cpu),
+                            sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
+
+#ifdef CONFIG_X86_32
+       per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
+#endif
+
+#ifdef CONFIG_X86_64
+       BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+       BUILD_BUG_ON(sizeof(exception_stacks) !=
+                    sizeof(((struct cpu_entry_area *)0)->exception_stacks));
+       cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
+                            &per_cpu(exception_stacks, cpu),
+                            sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
+
+       cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
+                    __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
+#endif
+       percpu_setup_debug_store(cpu);
+}
+
+static __init void setup_cpu_entry_area_ptes(void)
+{
+#ifdef CONFIG_X86_32
+       unsigned long start, end;
+
+       BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
+       BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
+
+       start = CPU_ENTRY_AREA_BASE;
+       end = start + CPU_ENTRY_AREA_MAP_SIZE;
+
+       /* Careful here: start + PMD_SIZE might wrap around */
+       for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
+               populate_extra_pte(start);
+#endif
+}
+
+void __init setup_cpu_entry_areas(void)
+{
+       unsigned int cpu;
+
+       setup_cpu_entry_area_ptes();
+
+       for_each_possible_cpu(cpu)
+               setup_cpu_entry_area(cpu);
+}
index bfcffdf..421f266 100644 (file)
@@ -5,7 +5,7 @@
 
 static int ptdump_show(struct seq_file *m, void *v)
 {
-       ptdump_walk_pgd_level(m, NULL);
+       ptdump_walk_pgd_level_debugfs(m, NULL, false);
        return 0;
 }
 
@@ -22,21 +22,89 @@ static const struct file_operations ptdump_fops = {
        .release        = single_release,
 };
 
-static struct dentry *pe;
+static int ptdump_show_curknl(struct seq_file *m, void *v)
+{
+       if (current->mm->pgd) {
+               down_read(&current->mm->mmap_sem);
+               ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false);
+               up_read(&current->mm->mmap_sem);
+       }
+       return 0;
+}
+
+static int ptdump_open_curknl(struct inode *inode, struct file *filp)
+{
+       return single_open(filp, ptdump_show_curknl, NULL);
+}
+
+static const struct file_operations ptdump_curknl_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ptdump_open_curknl,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+static struct dentry *pe_curusr;
+
+static int ptdump_show_curusr(struct seq_file *m, void *v)
+{
+       if (current->mm->pgd) {
+               down_read(&current->mm->mmap_sem);
+               ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true);
+               up_read(&current->mm->mmap_sem);
+       }
+       return 0;
+}
+
+static int ptdump_open_curusr(struct inode *inode, struct file *filp)
+{
+       return single_open(filp, ptdump_show_curusr, NULL);
+}
+
+static const struct file_operations ptdump_curusr_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ptdump_open_curusr,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+#endif
+
+static struct dentry *dir, *pe_knl, *pe_curknl;
 
 static int __init pt_dump_debug_init(void)
 {
-       pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL,
-                                &ptdump_fops);
-       if (!pe)
+       dir = debugfs_create_dir("page_tables", NULL);
+       if (!dir)
                return -ENOMEM;
 
+       pe_knl = debugfs_create_file("kernel", 0400, dir, NULL,
+                                    &ptdump_fops);
+       if (!pe_knl)
+               goto err;
+
+       pe_curknl = debugfs_create_file("current_kernel", 0400,
+                                       dir, NULL, &ptdump_curknl_fops);
+       if (!pe_curknl)
+               goto err;
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       pe_curusr = debugfs_create_file("current_user", 0400,
+                                       dir, NULL, &ptdump_curusr_fops);
+       if (!pe_curusr)
+               goto err;
+#endif
        return 0;
+err:
+       debugfs_remove_recursive(dir);
+       return -ENOMEM;
 }
 
 static void __exit pt_dump_debug_exit(void)
 {
-       debugfs_remove_recursive(pe);
+       debugfs_remove_recursive(dir);
 }
 
 module_init(pt_dump_debug_init);
index 5e3ac6f..2a4849e 100644 (file)
@@ -44,68 +44,97 @@ struct addr_marker {
        unsigned long max_lines;
 };
 
-/* indices for address_markers; keep sync'd w/ address_markers below */
+/* Address space markers hints */
+
+#ifdef CONFIG_X86_64
+
 enum address_markers_idx {
        USER_SPACE_NR = 0,
-#ifdef CONFIG_X86_64
        KERNEL_SPACE_NR,
        LOW_KERNEL_NR,
+#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
+       LDT_NR,
+#endif
        VMALLOC_START_NR,
        VMEMMAP_START_NR,
 #ifdef CONFIG_KASAN
        KASAN_SHADOW_START_NR,
        KASAN_SHADOW_END_NR,
 #endif
-# ifdef CONFIG_X86_ESPFIX64
+       CPU_ENTRY_AREA_NR,
+#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
+       LDT_NR,
+#endif
+#ifdef CONFIG_X86_ESPFIX64
        ESPFIX_START_NR,
-# endif
+#endif
+#ifdef CONFIG_EFI
+       EFI_END_NR,
+#endif
        HIGH_KERNEL_NR,
        MODULES_VADDR_NR,
        MODULES_END_NR,
-#else
+       FIXADDR_START_NR,
+       END_OF_SPACE_NR,
+};
+
+static struct addr_marker address_markers[] = {
+       [USER_SPACE_NR]         = { 0,                  "User Space" },
+       [KERNEL_SPACE_NR]       = { (1UL << 63),        "Kernel Space" },
+       [LOW_KERNEL_NR]         = { 0UL,                "Low Kernel Mapping" },
+       [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
+       [VMEMMAP_START_NR]      = { 0UL,                "Vmemmap" },
+#ifdef CONFIG_KASAN
+       [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" },
+       [KASAN_SHADOW_END_NR]   = { KASAN_SHADOW_END,   "KASAN shadow end" },
+#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+       [LDT_NR]                = { LDT_BASE_ADDR,      "LDT remap" },
+#endif
+       [CPU_ENTRY_AREA_NR]     = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
+#ifdef CONFIG_X86_ESPFIX64
+       [ESPFIX_START_NR]       = { ESPFIX_BASE_ADDR,   "ESPfix Area", 16 },
+#endif
+#ifdef CONFIG_EFI
+       [EFI_END_NR]            = { EFI_VA_END,         "EFI Runtime Services" },
+#endif
+       [HIGH_KERNEL_NR]        = { __START_KERNEL_map, "High Kernel Mapping" },
+       [MODULES_VADDR_NR]      = { MODULES_VADDR,      "Modules" },
+       [MODULES_END_NR]        = { MODULES_END,        "End Modules" },
+       [FIXADDR_START_NR]      = { FIXADDR_START,      "Fixmap Area" },
+       [END_OF_SPACE_NR]       = { -1,                 NULL }
+};
+
+#else /* CONFIG_X86_64 */
+
+enum address_markers_idx {
+       USER_SPACE_NR = 0,
        KERNEL_SPACE_NR,
        VMALLOC_START_NR,
        VMALLOC_END_NR,
-# ifdef CONFIG_HIGHMEM
+#ifdef CONFIG_HIGHMEM
        PKMAP_BASE_NR,
-# endif
-       FIXADDR_START_NR,
 #endif
+       CPU_ENTRY_AREA_NR,
+       FIXADDR_START_NR,
+       END_OF_SPACE_NR,
 };
 
-/* Address space markers hints */
 static struct addr_marker address_markers[] = {
-       { 0, "User Space" },
-#ifdef CONFIG_X86_64
-       { 0x8000000000000000UL, "Kernel Space" },
-       { 0/* PAGE_OFFSET */,   "Low Kernel Mapping" },
-       { 0/* VMALLOC_START */, "vmalloc() Area" },
-       { 0/* VMEMMAP_START */, "Vmemmap" },
-#ifdef CONFIG_KASAN
-       { KASAN_SHADOW_START,   "KASAN shadow" },
-       { KASAN_SHADOW_END,     "KASAN shadow end" },
+       [USER_SPACE_NR]         = { 0,                  "User Space" },
+       [KERNEL_SPACE_NR]       = { PAGE_OFFSET,        "Kernel Mapping" },
+       [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
+       [VMALLOC_END_NR]        = { 0UL,                "vmalloc() End" },
+#ifdef CONFIG_HIGHMEM
+       [PKMAP_BASE_NR]         = { 0UL,                "Persistent kmap() Area" },
 #endif
-# ifdef CONFIG_X86_ESPFIX64
-       { ESPFIX_BASE_ADDR,     "ESPfix Area", 16 },
-# endif
-# ifdef CONFIG_EFI
-       { EFI_VA_END,           "EFI Runtime Services" },
-# endif
-       { __START_KERNEL_map,   "High Kernel Mapping" },
-       { MODULES_VADDR,        "Modules" },
-       { MODULES_END,          "End Modules" },
-#else
-       { PAGE_OFFSET,          "Kernel Mapping" },
-       { 0/* VMALLOC_START */, "vmalloc() Area" },
-       { 0/*VMALLOC_END*/,     "vmalloc() End" },
-# ifdef CONFIG_HIGHMEM
-       { 0/*PKMAP_BASE*/,      "Persistent kmap() Area" },
-# endif
-       { 0/*FIXADDR_START*/,   "Fixmap Area" },
-#endif
-       { -1, NULL }            /* End of list */
+       [CPU_ENTRY_AREA_NR]     = { 0UL,                "CPU entry area" },
+       [FIXADDR_START_NR]      = { 0UL,                "Fixmap area" },
+       [END_OF_SPACE_NR]       = { -1,                 NULL }
 };
 
+#endif /* !CONFIG_X86_64 */
+
 /* Multipliers for offsets within the PTEs */
 #define PTE_LEVEL_MULT (PAGE_SIZE)
 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
@@ -140,7 +169,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
        static const char * const level_name[] =
                { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
 
-       if (!pgprot_val(prot)) {
+       if (!(pr & _PAGE_PRESENT)) {
                /* Not present */
                pt_dump_cont_printf(m, dmsg, "                              ");
        } else {
@@ -447,7 +476,7 @@ static inline bool is_hypervisor_range(int idx)
 }
 
 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
-                                      bool checkwx)
+                                      bool checkwx, bool dmesg)
 {
 #ifdef CONFIG_X86_64
        pgd_t *start = (pgd_t *) &init_top_pgt;
@@ -460,7 +489,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
 
        if (pgd) {
                start = pgd;
-               st.to_dmesg = true;
+               st.to_dmesg = dmesg;
        }
 
        st.check_wx = checkwx;
@@ -498,13 +527,37 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
 
 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
 {
-       ptdump_walk_pgd_level_core(m, pgd, false);
+       ptdump_walk_pgd_level_core(m, pgd, false, true);
+}
+
+void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       if (user && static_cpu_has(X86_FEATURE_PTI))
+               pgd = kernel_to_user_pgdp(pgd);
+#endif
+       ptdump_walk_pgd_level_core(m, pgd, false, false);
+}
+EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
+
+static void ptdump_walk_user_pgd_level_checkwx(void)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+       pgd_t *pgd = (pgd_t *) &init_top_pgt;
+
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+
+       pr_info("x86/mm: Checking user space page tables\n");
+       pgd = kernel_to_user_pgdp(pgd);
+       ptdump_walk_pgd_level_core(NULL, pgd, true, false);
+#endif
 }
-EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level);
 
 void ptdump_walk_pgd_level_checkwx(void)
 {
-       ptdump_walk_pgd_level_core(NULL, NULL, true);
+       ptdump_walk_pgd_level_core(NULL, NULL, true, false);
+       ptdump_walk_user_pgd_level_checkwx();
 }
 
 static int __init pt_dump_init(void)
@@ -525,8 +578,8 @@ static int __init pt_dump_init(void)
        address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
 # endif
        address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
+       address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
 #endif
-
        return 0;
 }
 __initcall(pt_dump_init);
index 3321b44..9fe656c 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/extable.h>
 #include <linux/uaccess.h>
 #include <linux/sched/debug.h>
+#include <xen/xen.h>
 
 #include <asm/fpu/internal.h>
 #include <asm/traps.h>
@@ -82,7 +83,7 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
 
        return true;
 }
-EXPORT_SYMBOL_GPL(ex_handler_refcount);
+EXPORT_SYMBOL(ex_handler_refcount);
 
 /*
  * Handler for when we fail to restore a task's FPU state.  We should never get
@@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
         * Old CPUs leave the high bits of CS on the stack
         * undefined.  I'm not sure which CPUs do this, but at least
         * the 486 DX works this way.
+        * Xen pv domains are not using the default __KERNEL_CS.
         */
-       if (regs->cs != __KERNEL_CS)
+       if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
                goto fail;
 
        /*
index 78ca9a8..800de81 100644 (file)
@@ -172,14 +172,15 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
  * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
  *          faulted on a pte with its pkey=4.
  */
-static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
+static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
+               u32 *pkey)
 {
        /* This is effectively an #ifdef */
        if (!boot_cpu_has(X86_FEATURE_OSPKE))
                return;
 
        /* Fault not from Protection Keys: nothing to do */
-       if (si_code != SEGV_PKUERR)
+       if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
                return;
        /*
         * force_sig_info_fault() is called from a number of
@@ -218,7 +219,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
                lsb = PAGE_SHIFT;
        info.si_addr_lsb = lsb;
 
-       fill_sig_info_pkey(si_code, &info, pkey);
+       fill_sig_info_pkey(si_signo, si_code, &info, pkey);
 
        force_sig_info(si_signo, &info, tsk);
 }
@@ -438,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address)
        if (pgd_none(*pgd_ref))
                return -1;
 
-       if (pgd_none(*pgd)) {
-               set_pgd(pgd, *pgd_ref);
-               arch_flush_lazy_mmu_mode();
-       } else if (CONFIG_PGTABLE_LEVELS > 4) {
-               /*
-                * With folded p4d, pgd_none() is always false, so the pgd may
-                * point to an empty page table entry and pgd_page_vaddr()
-                * will return garbage.
-                *
-                * We will do the correct sanity check on the p4d level.
-                */
-               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+       if (CONFIG_PGTABLE_LEVELS > 4) {
+               if (pgd_none(*pgd)) {
+                       set_pgd(pgd, *pgd_ref);
+                       arch_flush_lazy_mmu_mode();
+               } else {
+                       BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+               }
        }
 
        /* With 4-level paging, copying happens on the p4d level. */
@@ -458,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
        if (p4d_none(*p4d_ref))
                return -1;
 
-       if (p4d_none(*p4d)) {
+       if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) {
                set_p4d(p4d, *p4d_ref);
                arch_flush_lazy_mmu_mode();
        } else {
@@ -469,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address)
         * Below here mismatches are bugs because these lower tables
         * are shared:
         */
+       BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
 
        pud = pud_offset(p4d, address);
        pud_ref = pud_offset(p4d_ref, address);
@@ -701,7 +698,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
        else
                printk(KERN_CONT "paging request");
 
-       printk(KERN_CONT " at %p\n", (void *) address);
+       printk(KERN_CONT " at %px\n", (void *) address);
        printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip);
 
        dump_pagetable(address);
@@ -860,7 +857,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+       printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                tsk->comm, task_pid_nr(tsk), address,
                (void *)regs->ip, (void *)regs->sp, error_code);
index 6fdf91e..82f5252 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/kaslr.h>
 #include <asm/hypervisor.h>
 #include <asm/cpufeature.h>
+#include <asm/pti.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -160,6 +161,12 @@ struct map_range {
 
 static int page_size_mask;
 
+static void enable_global_pages(void)
+{
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               __supported_pte_mask |= _PAGE_GLOBAL;
+}
+
 static void __init probe_page_size_mask(void)
 {
        /*
@@ -177,11 +184,11 @@ static void __init probe_page_size_mask(void)
                cr4_set_bits_and_update_boot(X86_CR4_PSE);
 
        /* Enable PGE if available */
+       __supported_pte_mask &= ~_PAGE_GLOBAL;
        if (boot_cpu_has(X86_FEATURE_PGE)) {
                cr4_set_bits_and_update_boot(X86_CR4_PGE);
-               __supported_pte_mask |= _PAGE_GLOBAL;
-       } else
-               __supported_pte_mask &= ~_PAGE_GLOBAL;
+               enable_global_pages();
+       }
 
        /* Enable 1 GB linear kernel mappings if available: */
        if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
@@ -194,34 +201,44 @@ static void __init probe_page_size_mask(void)
 
 static void setup_pcid(void)
 {
-#ifdef CONFIG_X86_64
-       if (boot_cpu_has(X86_FEATURE_PCID)) {
-               if (boot_cpu_has(X86_FEATURE_PGE)) {
-                       /*
-                        * This can't be cr4_set_bits_and_update_boot() --
-                        * the trampoline code can't handle CR4.PCIDE and
-                        * it wouldn't do any good anyway.  Despite the name,
-                        * cr4_set_bits_and_update_boot() doesn't actually
-                        * cause the bits in question to remain set all the
-                        * way through the secondary boot asm.
-                        *
-                        * Instead, we brute-force it and set CR4.PCIDE
-                        * manually in start_secondary().
-                        */
-                       cr4_set_bits(X86_CR4_PCIDE);
-               } else {
-                       /*
-                        * flush_tlb_all(), as currently implemented, won't
-                        * work if PCID is on but PGE is not.  Since that
-                        * combination doesn't exist on real hardware, there's
-                        * no reason to try to fully support it, but it's
-                        * polite to avoid corrupting data if we're on
-                        * an improperly configured VM.
-                        */
-                       setup_clear_cpu_cap(X86_FEATURE_PCID);
-               }
+       if (!IS_ENABLED(CONFIG_X86_64))
+               return;
+
+       if (!boot_cpu_has(X86_FEATURE_PCID))
+               return;
+
+       if (boot_cpu_has(X86_FEATURE_PGE)) {
+               /*
+                * This can't be cr4_set_bits_and_update_boot() -- the
+                * trampoline code can't handle CR4.PCIDE and it wouldn't
+                * do any good anyway.  Despite the name,
+                * cr4_set_bits_and_update_boot() doesn't actually cause
+                * the bits in question to remain set all the way through
+                * the secondary boot asm.
+                *
+                * Instead, we brute-force it and set CR4.PCIDE manually in
+                * start_secondary().
+                */
+               cr4_set_bits(X86_CR4_PCIDE);
+
+               /*
+                * INVPCID's single-context modes (2/3) only work if we set
+                * X86_CR4_PCIDE, *and* we INVPCID support.  It's unusable
+                * on systems that have X86_CR4_PCIDE clear, or that have
+                * no INVPCID support at all.
+                */
+               if (boot_cpu_has(X86_FEATURE_INVPCID))
+                       setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
+       } else {
+               /*
+                * flush_tlb_all(), as currently implemented, won't work if
+                * PCID is on but PGE is not.  Since that combination
+                * doesn't exist on real hardware, there's no reason to try
+                * to fully support it, but it's polite to avoid corrupting
+                * data if we're on an improperly configured VM.
+                */
+               setup_clear_cpu_cap(X86_FEATURE_PCID);
        }
-#endif
 }
 
 #ifdef CONFIG_X86_32
@@ -622,6 +639,7 @@ void __init init_mem_mapping(void)
 {
        unsigned long end;
 
+       pti_check_boottime_disable();
        probe_page_size_mask();
        setup_pcid();
 
@@ -845,12 +863,12 @@ void __init zone_sizes_init(void)
        free_area_init_nodes(max_zone_pfns);
 }
 
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
        .loaded_mm = &init_mm,
        .next_asid = 1,
        .cr4 = ~0UL,    /* fail hard if we screw up cr4 shadow initialization */
 };
-EXPORT_SYMBOL_GPL(cpu_tlbstate);
+EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
 
 void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
 {
index 8a64a6f..135c9a7 100644 (file)
@@ -50,6 +50,7 @@
 #include <asm/setup.h>
 #include <asm/set_memory.h>
 #include <asm/page_types.h>
+#include <asm/cpu_entry_area.h>
 #include <asm/init.h>
 
 #include "mm_internal.h"
@@ -766,6 +767,7 @@ void __init mem_init(void)
        mem_init_print_info(NULL);
        printk(KERN_INFO "virtual kernel memory layout:\n"
                "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+               "  cpu_entry : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #ifdef CONFIG_HIGHMEM
                "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #endif
@@ -777,6 +779,10 @@ void __init mem_init(void)
                FIXADDR_START, FIXADDR_TOP,
                (FIXADDR_TOP - FIXADDR_START) >> 10,
 
+               CPU_ENTRY_AREA_BASE,
+               CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
+               CPU_ENTRY_AREA_MAP_SIZE >> 10,
+
 #ifdef CONFIG_HIGHMEM
                PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
                (LAST_PKMAP*PAGE_SIZE) >> 10,
index 6e4573b..c45b6ec 100644 (file)
@@ -404,11 +404,11 @@ void iounmap(volatile void __iomem *addr)
                return;
        }
 
+       mmiotrace_iounmap(addr);
+
        addr = (volatile void __iomem *)
                (PAGE_MASK & (unsigned long __force)addr);
 
-       mmiotrace_iounmap(addr);
-
        /* Use the vm area unlocked, assuming the caller
           ensures there isn't another iounmap for the same address
           in parallel. Reuse of the virtual address is prevented by
index 99dfed6..af6f2f9 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
+#include <asm/cpu_entry_area.h>
 
 extern struct range pfn_mapped[E820_MAX_ENTRIES];
 
 static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
 
-static __init void *early_alloc(size_t size, int nid)
+static __init void *early_alloc(size_t size, int nid, bool panic)
 {
-       return memblock_virt_alloc_try_nid_nopanic(size, size,
-               __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+       if (panic)
+               return memblock_virt_alloc_try_nid(size, size,
+                       __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+       else
+               return memblock_virt_alloc_try_nid_nopanic(size, size,
+                       __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
 }
 
 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
@@ -37,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
                if (boot_cpu_has(X86_FEATURE_PSE) &&
                    ((end - addr) == PMD_SIZE) &&
                    IS_ALIGNED(addr, PMD_SIZE)) {
-                       p = early_alloc(PMD_SIZE, nid);
+                       p = early_alloc(PMD_SIZE, nid, false);
                        if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
                                return;
                        else if (p)
                                memblock_free(__pa(p), PMD_SIZE);
                }
 
-               p = early_alloc(PAGE_SIZE, nid);
+               p = early_alloc(PAGE_SIZE, nid, true);
                pmd_populate_kernel(&init_mm, pmd, p);
        }
 
@@ -56,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
                if (!pte_none(*pte))
                        continue;
 
-               p = early_alloc(PAGE_SIZE, nid);
+               p = early_alloc(PAGE_SIZE, nid, true);
                entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
                set_pte_at(&init_mm, addr, pte, entry);
        } while (pte++, addr += PAGE_SIZE, addr != end);
@@ -74,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
                if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
                    ((end - addr) == PUD_SIZE) &&
                    IS_ALIGNED(addr, PUD_SIZE)) {
-                       p = early_alloc(PUD_SIZE, nid);
+                       p = early_alloc(PUD_SIZE, nid, false);
                        if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
                                return;
                        else if (p)
                                memblock_free(__pa(p), PUD_SIZE);
                }
 
-               p = early_alloc(PAGE_SIZE, nid);
+               p = early_alloc(PAGE_SIZE, nid, true);
                pud_populate(&init_mm, pud, p);
        }
 
@@ -100,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
        unsigned long next;
 
        if (p4d_none(*p4d)) {
-               void *p = early_alloc(PAGE_SIZE, nid);
+               void *p = early_alloc(PAGE_SIZE, nid, true);
 
                p4d_populate(&init_mm, p4d, p);
        }
@@ -121,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
        unsigned long next;
 
        if (pgd_none(*pgd)) {
-               p = early_alloc(PAGE_SIZE, nid);
+               p = early_alloc(PAGE_SIZE, nid, true);
                pgd_populate(&init_mm, pgd, p);
        }
 
@@ -277,6 +282,7 @@ void __init kasan_early_init(void)
 void __init kasan_init(void)
 {
        int i;
+       void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
 
 #ifdef CONFIG_KASAN_INLINE
        register_die_notifier(&kasan_die_notifier);
@@ -321,16 +327,33 @@ void __init kasan_init(void)
                map_range(&pfn_mapped[i]);
        }
 
+       shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
+       shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
+       shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
+                                               PAGE_SIZE);
+
+       shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
+                                       CPU_ENTRY_AREA_MAP_SIZE);
+       shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
+       shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
+                                       PAGE_SIZE);
+
        kasan_populate_zero_shadow(
                kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
-               kasan_mem_to_shadow((void *)__START_KERNEL_map));
+               shadow_cpu_entry_begin);
+
+       kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
+                             (unsigned long)shadow_cpu_entry_end, 0);
+
+       kasan_populate_zero_shadow(shadow_cpu_entry_end,
+                               kasan_mem_to_shadow((void *)__START_KERNEL_map));
 
        kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
                              (unsigned long)kasan_mem_to_shadow(_end),
                              early_pfn_to_nid(__pa(_stext)));
 
        kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
-                       (void *)KASAN_SHADOW_END);
+                               (void *)KASAN_SHADOW_END);
 
        load_cr3(init_top_pgt);
        __flush_tlb_all();
index 879ef93..aedebd2 100644 (file)
 #define TB_SHIFT 40
 
 /*
- * Virtual address start and end range for randomization. The end changes base
- * on configuration to have the highest amount of space for randomization.
- * It increases the possible random position for each randomized region.
+ * Virtual address start and end range for randomization.
  *
- * You need to add an if/def entry if you introduce a new memory region
- * compatible with KASLR. Your entry must be in logical order with memory
- * layout. For example, ESPFIX is before EFI because its virtual address is
- * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
- * ensure that this order is correct and won't be changed.
+ * The end address could depend on more configuration options to make the
+ * highest amount of space for randomization available, but that's too hard
+ * to keep straight and caused issues already.
  */
 static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
-
-#if defined(CONFIG_X86_ESPFIX64)
-static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
-#elif defined(CONFIG_EFI)
-static const unsigned long vaddr_end = EFI_VA_END;
-#else
-static const unsigned long vaddr_end = __START_KERNEL_map;
-#endif
+static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
 
 /* Default values */
 unsigned long page_offset_base = __PAGE_OFFSET_BASE;
@@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
        unsigned long remain_entropy;
 
        /*
-        * All these BUILD_BUG_ON checks ensures the memory layout is
-        * consistent with the vaddr_start/vaddr_end variables.
+        * These BUILD_BUG_ON checks ensure the memory layout is consistent
+        * with the vaddr_start/vaddr_end variables. These checks are very
+        * limited....
         */
        BUILD_BUG_ON(vaddr_start >= vaddr_end);
-       BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
-                    vaddr_end >= EFI_VA_END);
-       BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
-                     IS_ENABLED(CONFIG_EFI)) &&
-                    vaddr_end >= __START_KERNEL_map);
+       BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
        BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
 
        if (!kaslr_memory_enabled())
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
index c21c2ed..58477ec 100644 (file)
@@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
        unsigned long flags;
        int ret = 0;
        unsigned long size = 0;
+       unsigned long addr = p->addr & PAGE_MASK;
        const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
        unsigned int l;
        pte_t *pte;
 
        spin_lock_irqsave(&kmmio_lock, flags);
-       if (get_kmmio_probe(p->addr)) {
+       if (get_kmmio_probe(addr)) {
                ret = -EEXIST;
                goto out;
        }
 
-       pte = lookup_address(p->addr, &l);
+       pte = lookup_address(addr, &l);
        if (!pte) {
                ret = -EINVAL;
                goto out;
@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
        kmmio_count++;
        list_add_rcu(&p->list, &kmmio_probes);
        while (size < size_lim) {
-               if (add_kmmio_fault_page(p->addr + size))
+               if (add_kmmio_fault_page(addr + size))
                        pr_err("Unable to set page fault.\n");
                size += page_level_size(l);
        }
@@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
 {
        unsigned long flags;
        unsigned long size = 0;
+       unsigned long addr = p->addr & PAGE_MASK;
        const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
        struct kmmio_fault_page *release_list = NULL;
        struct kmmio_delayed_release *drelease;
        unsigned int l;
        pte_t *pte;
 
-       pte = lookup_address(p->addr, &l);
+       pte = lookup_address(addr, &l);
        if (!pte)
                return;
 
        spin_lock_irqsave(&kmmio_lock, flags);
        while (size < size_lim) {
-               release_kmmio_fault_page(p->addr + size, &release_list);
+               release_kmmio_fault_page(addr + size, &release_list);
                size += page_level_size(l);
        }
        list_del_rcu(&p->list);
index d9a9e9f..e1d61e8 100644 (file)
@@ -405,13 +405,13 @@ bool sme_active(void)
 {
        return sme_me_mask && !sev_enabled;
 }
-EXPORT_SYMBOL_GPL(sme_active);
+EXPORT_SYMBOL(sme_active);
 
 bool sev_active(void)
 {
        return sme_me_mask && sev_enabled;
 }
-EXPORT_SYMBOL_GPL(sev_active);
+EXPORT_SYMBOL(sev_active);
 
 static const struct dma_map_ops sev_dma_ops = {
        .alloc                  = sev_alloc,
@@ -464,37 +464,62 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
        set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
 }
 
-static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
-                                unsigned long end)
+struct sme_populate_pgd_data {
+       void    *pgtable_area;
+       pgd_t   *pgd;
+
+       pmdval_t pmd_flags;
+       pteval_t pte_flags;
+       unsigned long paddr;
+
+       unsigned long vaddr;
+       unsigned long vaddr_end;
+};
+
+static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
 {
        unsigned long pgd_start, pgd_end, pgd_size;
        pgd_t *pgd_p;
 
-       pgd_start = start & PGDIR_MASK;
-       pgd_end = end & PGDIR_MASK;
+       pgd_start = ppd->vaddr & PGDIR_MASK;
+       pgd_end = ppd->vaddr_end & PGDIR_MASK;
 
-       pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
-       pgd_size *= sizeof(pgd_t);
+       pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
 
-       pgd_p = pgd_base + pgd_index(start);
+       pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
 
        memset(pgd_p, 0, pgd_size);
 }
 
-#define PGD_FLAGS      _KERNPG_TABLE_NOENC
-#define P4D_FLAGS      _KERNPG_TABLE_NOENC
-#define PUD_FLAGS      _KERNPG_TABLE_NOENC
-#define PMD_FLAGS      (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+#define PGD_FLAGS              _KERNPG_TABLE_NOENC
+#define P4D_FLAGS              _KERNPG_TABLE_NOENC
+#define PUD_FLAGS              _KERNPG_TABLE_NOENC
+#define PMD_FLAGS              _KERNPG_TABLE_NOENC
+
+#define PMD_FLAGS_LARGE                (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+
+#define PMD_FLAGS_DEC          PMD_FLAGS_LARGE
+#define PMD_FLAGS_DEC_WP       ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+                                (_PAGE_PAT | _PAGE_PWT))
+
+#define PMD_FLAGS_ENC          (PMD_FLAGS_LARGE | _PAGE_ENC)
+
+#define PTE_FLAGS              (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
+
+#define PTE_FLAGS_DEC          PTE_FLAGS
+#define PTE_FLAGS_DEC_WP       ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+                                (_PAGE_PAT | _PAGE_PWT))
+
+#define PTE_FLAGS_ENC          (PTE_FLAGS | _PAGE_ENC)
 
-static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
-                                    unsigned long vaddr, pmdval_t pmd_val)
+static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
 {
        pgd_t *pgd_p;
        p4d_t *p4d_p;
        pud_t *pud_p;
        pmd_t *pmd_p;
 
-       pgd_p = pgd_base + pgd_index(vaddr);
+       pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
        if (native_pgd_val(*pgd_p)) {
                if (IS_ENABLED(CONFIG_X86_5LEVEL))
                        p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
@@ -504,15 +529,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
                pgd_t pgd;
 
                if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
-                       p4d_p = pgtable_area;
+                       p4d_p = ppd->pgtable_area;
                        memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
-                       pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
+                       ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
 
                        pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
                } else {
-                       pud_p = pgtable_area;
+                       pud_p = ppd->pgtable_area;
                        memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
-                       pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+                       ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
 
                        pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
                }
@@ -520,58 +545,160 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
        }
 
        if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
-               p4d_p += p4d_index(vaddr);
+               p4d_p += p4d_index(ppd->vaddr);
                if (native_p4d_val(*p4d_p)) {
                        pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
                } else {
                        p4d_t p4d;
 
-                       pud_p = pgtable_area;
+                       pud_p = ppd->pgtable_area;
                        memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
-                       pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+                       ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
 
                        p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
                        native_set_p4d(p4d_p, p4d);
                }
        }
 
-       pud_p += pud_index(vaddr);
+       pud_p += pud_index(ppd->vaddr);
        if (native_pud_val(*pud_p)) {
                if (native_pud_val(*pud_p) & _PAGE_PSE)
-                       goto out;
+                       return NULL;
 
                pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
        } else {
                pud_t pud;
 
-               pmd_p = pgtable_area;
+               pmd_p = ppd->pgtable_area;
                memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
-               pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
+               ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
 
                pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
                native_set_pud(pud_p, pud);
        }
 
-       pmd_p += pmd_index(vaddr);
+       return pmd_p;
+}
+
+static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+{
+       pmd_t *pmd_p;
+
+       pmd_p = sme_prepare_pgd(ppd);
+       if (!pmd_p)
+               return;
+
+       pmd_p += pmd_index(ppd->vaddr);
        if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
-               native_set_pmd(pmd_p, native_make_pmd(pmd_val));
+               native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
+}
 
-out:
-       return pgtable_area;
+static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+{
+       pmd_t *pmd_p;
+       pte_t *pte_p;
+
+       pmd_p = sme_prepare_pgd(ppd);
+       if (!pmd_p)
+               return;
+
+       pmd_p += pmd_index(ppd->vaddr);
+       if (native_pmd_val(*pmd_p)) {
+               if (native_pmd_val(*pmd_p) & _PAGE_PSE)
+                       return;
+
+               pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
+       } else {
+               pmd_t pmd;
+
+               pte_p = ppd->pgtable_area;
+               memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
+               ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
+
+               pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
+               native_set_pmd(pmd_p, pmd);
+       }
+
+       pte_p += pte_index(ppd->vaddr);
+       if (!native_pte_val(*pte_p))
+               native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
+}
+
+static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+{
+       while (ppd->vaddr < ppd->vaddr_end) {
+               sme_populate_pgd_large(ppd);
+
+               ppd->vaddr += PMD_PAGE_SIZE;
+               ppd->paddr += PMD_PAGE_SIZE;
+       }
+}
+
+static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+{
+       while (ppd->vaddr < ppd->vaddr_end) {
+               sme_populate_pgd(ppd);
+
+               ppd->vaddr += PAGE_SIZE;
+               ppd->paddr += PAGE_SIZE;
+       }
+}
+
+static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+                                  pmdval_t pmd_flags, pteval_t pte_flags)
+{
+       unsigned long vaddr_end;
+
+       ppd->pmd_flags = pmd_flags;
+       ppd->pte_flags = pte_flags;
+
+       /* Save original end value since we modify the struct value */
+       vaddr_end = ppd->vaddr_end;
+
+       /* If start is not 2MB aligned, create PTE entries */
+       ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
+       __sme_map_range_pte(ppd);
+
+       /* Create PMD entries */
+       ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
+       __sme_map_range_pmd(ppd);
+
+       /* If end is not 2MB aligned, create PTE entries */
+       ppd->vaddr_end = vaddr_end;
+       __sme_map_range_pte(ppd);
+}
+
+static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
+{
+       __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
+}
+
+static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
+{
+       __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
+}
+
+static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
+{
+       __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
 }
 
 static unsigned long __init sme_pgtable_calc(unsigned long len)
 {
-       unsigned long p4d_size, pud_size, pmd_size;
+       unsigned long p4d_size, pud_size, pmd_size, pte_size;
        unsigned long total;
 
        /*
         * Perform a relatively simplistic calculation of the pagetable
-        * entries that are needed. That mappings will be covered by 2MB
-        * PMD entries so we can conservatively calculate the required
+        * entries that are needed. Those mappings will be covered mostly
+        * by 2MB PMD entries so we can conservatively calculate the required
         * number of P4D, PUD and PMD structures needed to perform the
-        * mappings. Incrementing the count for each covers the case where
-        * the addresses cross entries.
+        * mappings.  For mappings that are not 2MB aligned, PTE mappings
+        * would be needed for the start and end portion of the address range
+        * that fall outside of the 2MB alignment.  This results in, at most,
+        * two extra pages to hold PTE entries for each range that is mapped.
+        * Incrementing the count for each covers the case where the addresses
+        * cross entries.
         */
        if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
                p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
@@ -585,8 +712,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
        }
        pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
        pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
+       pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
 
-       total = p4d_size + pud_size + pmd_size;
+       total = p4d_size + pud_size + pmd_size + pte_size;
 
        /*
         * Now calculate the added pagetable structures needed to populate
@@ -610,29 +738,29 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
        return total;
 }
 
-void __init sme_encrypt_kernel(void)
+void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
 {
        unsigned long workarea_start, workarea_end, workarea_len;
        unsigned long execute_start, execute_end, execute_len;
        unsigned long kernel_start, kernel_end, kernel_len;
+       unsigned long initrd_start, initrd_end, initrd_len;
+       struct sme_populate_pgd_data ppd;
        unsigned long pgtable_area_len;
-       unsigned long paddr, pmd_flags;
        unsigned long decrypted_base;
-       void *pgtable_area;
-       pgd_t *pgd;
 
        if (!sme_active())
                return;
 
        /*
-        * Prepare for encrypting the kernel by building new pagetables with
-        * the necessary attributes needed to encrypt the kernel in place.
+        * Prepare for encrypting the kernel and initrd by building new
+        * pagetables with the necessary attributes needed to encrypt the
+        * kernel in place.
         *
         *   One range of virtual addresses will map the memory occupied
-        *   by the kernel as encrypted.
+        *   by the kernel and initrd as encrypted.
         *
         *   Another range of virtual addresses will map the memory occupied
-        *   by the kernel as decrypted and write-protected.
+        *   by the kernel and initrd as decrypted and write-protected.
         *
         *     The use of write-protect attribute will prevent any of the
         *     memory from being cached.
@@ -643,6 +771,20 @@ void __init sme_encrypt_kernel(void)
        kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
        kernel_len = kernel_end - kernel_start;
 
+       initrd_start = 0;
+       initrd_end = 0;
+       initrd_len = 0;
+#ifdef CONFIG_BLK_DEV_INITRD
+       initrd_len = (unsigned long)bp->hdr.ramdisk_size |
+                    ((unsigned long)bp->ext_ramdisk_size << 32);
+       if (initrd_len) {
+               initrd_start = (unsigned long)bp->hdr.ramdisk_image |
+                              ((unsigned long)bp->ext_ramdisk_image << 32);
+               initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
+               initrd_len = initrd_end - initrd_start;
+       }
+#endif
+
        /* Set the encryption workarea to be immediately after the kernel */
        workarea_start = kernel_end;
 
@@ -665,16 +807,21 @@ void __init sme_encrypt_kernel(void)
         */
        pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
        pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
+       if (initrd_len)
+               pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
 
        /* PUDs and PMDs needed in the current pagetables for the workarea */
        pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
 
        /*
         * The total workarea includes the executable encryption area and
-        * the pagetable area.
+        * the pagetable area. The start of the workarea is already 2MB
+        * aligned, align the end of the workarea on a 2MB boundary so that
+        * we don't try to create/allocate PTE entries from the workarea
+        * before it is mapped.
         */
        workarea_len = execute_len + pgtable_area_len;
-       workarea_end = workarea_start + workarea_len;
+       workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
 
        /*
         * Set the address to the start of where newly created pagetable
@@ -683,45 +830,30 @@ void __init sme_encrypt_kernel(void)
         * pagetables and when the new encrypted and decrypted kernel
         * mappings are populated.
         */
-       pgtable_area = (void *)execute_end;
+       ppd.pgtable_area = (void *)execute_end;
 
        /*
         * Make sure the current pagetable structure has entries for
         * addressing the workarea.
         */
-       pgd = (pgd_t *)native_read_cr3_pa();
-       paddr = workarea_start;
-       while (paddr < workarea_end) {
-               pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-                                               paddr,
-                                               paddr + PMD_FLAGS);
-
-               paddr += PMD_PAGE_SIZE;
-       }
+       ppd.pgd = (pgd_t *)native_read_cr3_pa();
+       ppd.paddr = workarea_start;
+       ppd.vaddr = workarea_start;
+       ppd.vaddr_end = workarea_end;
+       sme_map_range_decrypted(&ppd);
 
        /* Flush the TLB - no globals so cr3 is enough */
        native_write_cr3(__native_read_cr3());
 
        /*
         * A new pagetable structure is being built to allow for the kernel
-        * to be encrypted. It starts with an empty PGD that will then be
-        * populated with new PUDs and PMDs as the encrypted and decrypted
-        * kernel mappings are created.
+        * and initrd to be encrypted. It starts with an empty PGD that will
+        * then be populated with new PUDs and PMDs as the encrypted and
+        * decrypted kernel mappings are created.
         */
-       pgd = pgtable_area;
-       memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
-       pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
-
-       /* Add encrypted kernel (identity) mappings */
-       pmd_flags = PMD_FLAGS | _PAGE_ENC;
-       paddr = kernel_start;
-       while (paddr < kernel_end) {
-               pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-                                               paddr,
-                                               paddr + pmd_flags);
-
-               paddr += PMD_PAGE_SIZE;
-       }
+       ppd.pgd = ppd.pgtable_area;
+       memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
+       ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
 
        /*
         * A different PGD index/entry must be used to get different
@@ -730,47 +862,79 @@ void __init sme_encrypt_kernel(void)
         * the base of the mapping.
         */
        decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
+       if (initrd_len) {
+               unsigned long check_base;
+
+               check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
+               decrypted_base = max(decrypted_base, check_base);
+       }
        decrypted_base <<= PGDIR_SHIFT;
 
+       /* Add encrypted kernel (identity) mappings */
+       ppd.paddr = kernel_start;
+       ppd.vaddr = kernel_start;
+       ppd.vaddr_end = kernel_end;
+       sme_map_range_encrypted(&ppd);
+
        /* Add decrypted, write-protected kernel (non-identity) mappings */
-       pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
-       paddr = kernel_start;
-       while (paddr < kernel_end) {
-               pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-                                               paddr + decrypted_base,
-                                               paddr + pmd_flags);
-
-               paddr += PMD_PAGE_SIZE;
+       ppd.paddr = kernel_start;
+       ppd.vaddr = kernel_start + decrypted_base;
+       ppd.vaddr_end = kernel_end + decrypted_base;
+       sme_map_range_decrypted_wp(&ppd);
+
+       if (initrd_len) {
+               /* Add encrypted initrd (identity) mappings */
+               ppd.paddr = initrd_start;
+               ppd.vaddr = initrd_start;
+               ppd.vaddr_end = initrd_end;
+               sme_map_range_encrypted(&ppd);
+               /*
+                * Add decrypted, write-protected initrd (non-identity) mappings
+                */
+               ppd.paddr = initrd_start;
+               ppd.vaddr = initrd_start + decrypted_base;
+               ppd.vaddr_end = initrd_end + decrypted_base;
+               sme_map_range_decrypted_wp(&ppd);
        }
 
        /* Add decrypted workarea mappings to both kernel mappings */
-       paddr = workarea_start;
-       while (paddr < workarea_end) {
-               pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-                                               paddr,
-                                               paddr + PMD_FLAGS);
+       ppd.paddr = workarea_start;
+       ppd.vaddr = workarea_start;
+       ppd.vaddr_end = workarea_end;
+       sme_map_range_decrypted(&ppd);
 
-               pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-                                               paddr + decrypted_base,
-                                               paddr + PMD_FLAGS);
-
-               paddr += PMD_PAGE_SIZE;
-       }
+       ppd.paddr = workarea_start;
+       ppd.vaddr = workarea_start + decrypted_base;
+       ppd.vaddr_end = workarea_end + decrypted_base;
+       sme_map_range_decrypted(&ppd);
 
        /* Perform the encryption */
        sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
-                           kernel_len, workarea_start, (unsigned long)pgd);
+                           kernel_len, workarea_start, (unsigned long)ppd.pgd);
+
+       if (initrd_len)
+               sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
+                                   initrd_len, workarea_start,
+                                   (unsigned long)ppd.pgd);
 
        /*
         * At this point we are running encrypted.  Remove the mappings for
         * the decrypted areas - all that is needed for this is to remove
         * the PGD entry/entries.
         */
-       sme_clear_pgd(pgd, kernel_start + decrypted_base,
-                     kernel_end + decrypted_base);
+       ppd.vaddr = kernel_start + decrypted_base;
+       ppd.vaddr_end = kernel_end + decrypted_base;
+       sme_clear_pgd(&ppd);
+
+       if (initrd_len) {
+               ppd.vaddr = initrd_start + decrypted_base;
+               ppd.vaddr_end = initrd_end + decrypted_base;
+               sme_clear_pgd(&ppd);
+       }
 
-       sme_clear_pgd(pgd, workarea_start + decrypted_base,
-                     workarea_end + decrypted_base);
+       ppd.vaddr = workarea_start + decrypted_base;
+       ppd.vaddr_end = workarea_end + decrypted_base;
+       sme_clear_pgd(&ppd);
 
        /* Flush the TLB - no globals so cr3 is enough */
        native_write_cr3(__native_read_cr3());
index 730e6d5..01f682c 100644 (file)
@@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute)
 
        /*
         * Entry parameters:
-        *   RDI - virtual address for the encrypted kernel mapping
-        *   RSI - virtual address for the decrypted kernel mapping
-        *   RDX - length of kernel
+        *   RDI - virtual address for the encrypted mapping
+        *   RSI - virtual address for the decrypted mapping
+        *   RDX - length to encrypt
         *   RCX - virtual address of the encryption workarea, including:
         *     - stack page (PAGE_SIZE)
         *     - encryption routine page (PAGE_SIZE)
@@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute)
        addq    $PAGE_SIZE, %rax        /* Workarea encryption routine */
 
        push    %r12
-       movq    %rdi, %r10              /* Encrypted kernel */
-       movq    %rsi, %r11              /* Decrypted kernel */
-       movq    %rdx, %r12              /* Kernel length */
+       movq    %rdi, %r10              /* Encrypted area */
+       movq    %rsi, %r11              /* Decrypted area */
+       movq    %rdx, %r12              /* Area length */
 
        /* Copy encryption routine into the workarea */
        movq    %rax, %rdi                              /* Workarea encryption routine */
@@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute)
        rep     movsb
 
        /* Setup registers for call */
-       movq    %r10, %rdi              /* Encrypted kernel */
-       movq    %r11, %rsi              /* Decrypted kernel */
+       movq    %r10, %rdi              /* Encrypted area */
+       movq    %r11, %rsi              /* Decrypted area */
        movq    %r8, %rdx               /* Pagetables used for encryption */
-       movq    %r12, %rcx              /* Kernel length */
+       movq    %r12, %rcx              /* Area length */
        movq    %rax, %r8               /* Workarea encryption routine */
        addq    $PAGE_SIZE, %r8         /* Workarea intermediate copy buffer */
 
@@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute)
 
 ENTRY(__enc_copy)
 /*
- * Routine used to encrypt kernel.
+ * Routine used to encrypt memory in place.
  *   This routine must be run outside of the kernel proper since
  *   the kernel will be encrypted during the process. So this
  *   routine is defined here and then copied to an area outside
@@ -79,19 +79,19 @@ ENTRY(__enc_copy)
  *   during execution.
  *
  *   On entry the registers must be:
- *     RDI - virtual address for the encrypted kernel mapping
- *     RSI - virtual address for the decrypted kernel mapping
+ *     RDI - virtual address for the encrypted mapping
+ *     RSI - virtual address for the decrypted mapping
  *     RDX - address of the pagetables to use for encryption
- *     RCX - length of kernel
+ *     RCX - length of area
  *      R8 - intermediate copy buffer
  *
  *     RAX - points to this routine
  *
- * The kernel will be encrypted by copying from the non-encrypted
- * kernel space to an intermediate buffer and then copying from the
- * intermediate buffer back to the encrypted kernel space. The physical
- * addresses of the two kernel space mappings are the same which
- * results in the kernel being encrypted "in place".
+ * The area will be encrypted by copying from the non-encrypted
+ * memory space to an intermediate buffer and then copying from the
+ * intermediate buffer back to the encrypted memory space. The physical
+ * addresses of the two mappings are the same which results in the area
+ * being encrypted "in place".
  */
        /* Enable the new page tables */
        mov     %rdx, %cr3
@@ -103,47 +103,55 @@ ENTRY(__enc_copy)
        orq     $X86_CR4_PGE, %rdx
        mov     %rdx, %cr4
 
+       push    %r15
+       push    %r12
+
+       movq    %rcx, %r9               /* Save area length */
+       movq    %rdi, %r10              /* Save encrypted area address */
+       movq    %rsi, %r11              /* Save decrypted area address */
+
        /* Set the PAT register PA5 entry to write-protect */
-       push    %rcx
        movl    $MSR_IA32_CR_PAT, %ecx
        rdmsr
-       push    %rdx                    /* Save original PAT value */
+       mov     %rdx, %r15              /* Save original PAT value */
        andl    $0xffff00ff, %edx       /* Clear PA5 */
        orl     $0x00000500, %edx       /* Set PA5 to WP */
        wrmsr
-       pop     %rdx                    /* RDX contains original PAT value */
-       pop     %rcx
-
-       movq    %rcx, %r9               /* Save kernel length */
-       movq    %rdi, %r10              /* Save encrypted kernel address */
-       movq    %rsi, %r11              /* Save decrypted kernel address */
 
        wbinvd                          /* Invalidate any cache entries */
 
-       /* Copy/encrypt 2MB at a time */
+       /* Copy/encrypt up to 2MB at a time */
+       movq    $PMD_PAGE_SIZE, %r12
 1:
-       movq    %r11, %rsi              /* Source - decrypted kernel */
+       cmpq    %r12, %r9
+       jnb     2f
+       movq    %r9, %r12
+
+2:
+       movq    %r11, %rsi              /* Source - decrypted area */
        movq    %r8, %rdi               /* Dest   - intermediate copy buffer */
-       movq    $PMD_PAGE_SIZE, %rcx    /* 2MB length */
+       movq    %r12, %rcx
        rep     movsb
 
        movq    %r8, %rsi               /* Source - intermediate copy buffer */
-       movq    %r10, %rdi              /* Dest   - encrypted kernel */
-       movq    $PMD_PAGE_SIZE, %rcx    /* 2MB length */
+       movq    %r10, %rdi              /* Dest   - encrypted area */
+       movq    %r12, %rcx
        rep     movsb
 
-       addq    $PMD_PAGE_SIZE, %r11
-       addq    $PMD_PAGE_SIZE, %r10
-       subq    $PMD_PAGE_SIZE, %r9     /* Kernel length decrement */
+       addq    %r12, %r11
+       addq    %r12, %r10
+       subq    %r12, %r9               /* Kernel length decrement */
        jnz     1b                      /* Kernel length not zero? */
 
        /* Restore PAT register */
-       push    %rdx                    /* Save original PAT value */
        movl    $MSR_IA32_CR_PAT, %ecx
        rdmsr
-       pop     %rdx                    /* Restore original PAT value */
+       mov     %r15, %rdx              /* Restore original PAT value */
        wrmsr
 
+       pop     %r12
+       pop     %r15
+
        ret
 .L__enc_copy_end:
 ENDPROC(__enc_copy)
index 96d456a..004abf9 100644 (file)
@@ -355,14 +355,15 @@ static inline void _pgd_free(pgd_t *pgd)
                kmem_cache_free(pgd_cache, pgd);
 }
 #else
+
 static inline pgd_t *_pgd_alloc(void)
 {
-       return (pgd_t *)__get_free_page(PGALLOC_GFP);
+       return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
 }
 
 static inline void _pgd_free(pgd_t *pgd)
 {
-       free_page((unsigned long)pgd);
+       free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
 }
 #endif /* CONFIG_X86_PAE */
 
index 6b9bf02..c3c5274 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/pagemap.h>
 #include <linux/spinlock.h>
 
+#include <asm/cpu_entry_area.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/fixmap.h>
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
new file mode 100644 (file)
index 0000000..ce38f16
--- /dev/null
@@ -0,0 +1,368 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * This code is based in part on work published here:
+ *
+ *     https://github.com/IAIK/KAISER
+ *
+ * The original work was written by and and signed off by for the Linux
+ * kernel by:
+ *
+ *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
+ *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
+ *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
+ *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
+ *
+ * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
+ * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
+ *                    Andy Lutomirsky <luto@amacapital.net>
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+
+#include <asm/cpufeature.h>
+#include <asm/hypervisor.h>
+#include <asm/vsyscall.h>
+#include <asm/cmdline.h>
+#include <asm/pti.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/desc.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
+
+/* Backporting helper */
+#ifndef __GFP_NOTRACK
+#define __GFP_NOTRACK  0
+#endif
+
+static void __init pti_print_if_insecure(const char *reason)
+{
+       if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+               pr_info("%s\n", reason);
+}
+
+static void __init pti_print_if_secure(const char *reason)
+{
+       if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+               pr_info("%s\n", reason);
+}
+
+void __init pti_check_boottime_disable(void)
+{
+       char arg[5];
+       int ret;
+
+       if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
+               pti_print_if_insecure("disabled on XEN PV.");
+               return;
+       }
+
+       ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
+       if (ret > 0)  {
+               if (ret == 3 && !strncmp(arg, "off", 3)) {
+                       pti_print_if_insecure("disabled on command line.");
+                       return;
+               }
+               if (ret == 2 && !strncmp(arg, "on", 2)) {
+                       pti_print_if_secure("force enabled on command line.");
+                       goto enable;
+               }
+               if (ret == 4 && !strncmp(arg, "auto", 4))
+                       goto autosel;
+       }
+
+       if (cmdline_find_option_bool(boot_command_line, "nopti")) {
+               pti_print_if_insecure("disabled on command line.");
+               return;
+       }
+
+autosel:
+       if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+               return;
+enable:
+       setup_force_cpu_cap(X86_FEATURE_PTI);
+}
+
+pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+       /*
+        * Changes to the high (kernel) portion of the kernelmode page
+        * tables are not automatically propagated to the usermode tables.
+        *
+        * Users should keep in mind that, unlike the kernelmode tables,
+        * there is no vmalloc_fault equivalent for the usermode tables.
+        * Top-level entries added to init_mm's usermode pgd after boot
+        * will not be automatically propagated to other mms.
+        */
+       if (!pgdp_maps_userspace(pgdp))
+               return pgd;
+
+       /*
+        * The user page tables get the full PGD, accessible from
+        * userspace:
+        */
+       kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
+
+       /*
+        * If this is normal user memory, make it NX in the kernel
+        * pagetables so that, if we somehow screw up and return to
+        * usermode with the kernel CR3 loaded, we'll get a page fault
+        * instead of allowing user code to execute with the wrong CR3.
+        *
+        * As exceptions, we don't set NX if:
+        *  - _PAGE_USER is not set.  This could be an executable
+        *     EFI runtime mapping or something similar, and the kernel
+        *     may execute from it
+        *  - we don't have NX support
+        *  - we're clearing the PGD (i.e. the new pgd is not present).
+        */
+       if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
+           (__supported_pte_mask & _PAGE_NX))
+               pgd.pgd |= _PAGE_NX;
+
+       /* return the copy of the PGD we want the kernel to use: */
+       return pgd;
+}
+
+/*
+ * Walk the user copy of the page tables (optionally) trying to allocate
+ * page table pages on the way down.
+ *
+ * Returns a pointer to a P4D on success, or NULL on failure.
+ */
+static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
+{
+       pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
+       gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+
+       if (address < PAGE_OFFSET) {
+               WARN_ONCE(1, "attempt to walk user address\n");
+               return NULL;
+       }
+
+       if (pgd_none(*pgd)) {
+               unsigned long new_p4d_page = __get_free_page(gfp);
+               if (!new_p4d_page)
+                       return NULL;
+
+               set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
+       }
+       BUILD_BUG_ON(pgd_large(*pgd) != 0);
+
+       return p4d_offset(pgd, address);
+}
+
+/*
+ * Walk the user copy of the page tables (optionally) trying to allocate
+ * page table pages on the way down.
+ *
+ * Returns a pointer to a PMD on success, or NULL on failure.
+ */
+static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+{
+       gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+       p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
+       pud_t *pud;
+
+       BUILD_BUG_ON(p4d_large(*p4d) != 0);
+       if (p4d_none(*p4d)) {
+               unsigned long new_pud_page = __get_free_page(gfp);
+               if (!new_pud_page)
+                       return NULL;
+
+               set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
+       }
+
+       pud = pud_offset(p4d, address);
+       /* The user page tables do not use large mappings: */
+       if (pud_large(*pud)) {
+               WARN_ON(1);
+               return NULL;
+       }
+       if (pud_none(*pud)) {
+               unsigned long new_pmd_page = __get_free_page(gfp);
+               if (!new_pmd_page)
+                       return NULL;
+
+               set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
+       }
+
+       return pmd_offset(pud, address);
+}
+
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
+/*
+ * Walk the shadow copy of the page tables (optionally) trying to allocate
+ * page table pages on the way down.  Does not support large pages.
+ *
+ * Note: this is only used when mapping *new* kernel data into the
+ * user/shadow page tables.  It is never used for userspace data.
+ *
+ * Returns a pointer to a PTE on success, or NULL on failure.
+ */
+static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+{
+       gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+       pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
+       pte_t *pte;
+
+       /* We can't do anything sensible if we hit a large mapping. */
+       if (pmd_large(*pmd)) {
+               WARN_ON(1);
+               return NULL;
+       }
+
+       if (pmd_none(*pmd)) {
+               unsigned long new_pte_page = __get_free_page(gfp);
+               if (!new_pte_page)
+                       return NULL;
+
+               set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
+       }
+
+       pte = pte_offset_kernel(pmd, address);
+       if (pte_flags(*pte) & _PAGE_USER) {
+               WARN_ONCE(1, "attempt to walk to user pte\n");
+               return NULL;
+       }
+       return pte;
+}
+
+static void __init pti_setup_vsyscall(void)
+{
+       pte_t *pte, *target_pte;
+       unsigned int level;
+
+       pte = lookup_address(VSYSCALL_ADDR, &level);
+       if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
+               return;
+
+       target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
+       if (WARN_ON(!target_pte))
+               return;
+
+       *target_pte = *pte;
+       set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
+}
+#else
+static void __init pti_setup_vsyscall(void) { }
+#endif
+
+static void __init
+pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
+{
+       unsigned long addr;
+
+       /*
+        * Clone the populated PMDs which cover start to end. These PMD areas
+        * can have holes.
+        */
+       for (addr = start; addr < end; addr += PMD_SIZE) {
+               pmd_t *pmd, *target_pmd;
+               pgd_t *pgd;
+               p4d_t *p4d;
+               pud_t *pud;
+
+               pgd = pgd_offset_k(addr);
+               if (WARN_ON(pgd_none(*pgd)))
+                       return;
+               p4d = p4d_offset(pgd, addr);
+               if (WARN_ON(p4d_none(*p4d)))
+                       return;
+               pud = pud_offset(p4d, addr);
+               if (pud_none(*pud))
+                       continue;
+               pmd = pmd_offset(pud, addr);
+               if (pmd_none(*pmd))
+                       continue;
+
+               target_pmd = pti_user_pagetable_walk_pmd(addr);
+               if (WARN_ON(!target_pmd))
+                       return;
+
+               /*
+                * Copy the PMD.  That is, the kernelmode and usermode
+                * tables will share the last-level page tables of this
+                * address range
+                */
+               *target_pmd = pmd_clear_flags(*pmd, clear);
+       }
+}
+
+/*
+ * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
+ * next-level entry on 5-level systems.
+ */
+static void __init pti_clone_p4d(unsigned long addr)
+{
+       p4d_t *kernel_p4d, *user_p4d;
+       pgd_t *kernel_pgd;
+
+       user_p4d = pti_user_pagetable_walk_p4d(addr);
+       kernel_pgd = pgd_offset_k(addr);
+       kernel_p4d = p4d_offset(kernel_pgd, addr);
+       *user_p4d = *kernel_p4d;
+}
+
+/*
+ * Clone the CPU_ENTRY_AREA into the user space visible page table.
+ */
+static void __init pti_clone_user_shared(void)
+{
+       pti_clone_p4d(CPU_ENTRY_AREA_BASE);
+}
+
+/*
+ * Clone the ESPFIX P4D into the user space visinble page table
+ */
+static void __init pti_setup_espfix64(void)
+{
+#ifdef CONFIG_X86_ESPFIX64
+       pti_clone_p4d(ESPFIX_BASE_ADDR);
+#endif
+}
+
+/*
+ * Clone the populated PMDs of the entry and irqentry text and force it RO.
+ */
+static void __init pti_clone_entry_text(void)
+{
+       pti_clone_pmds((unsigned long) __entry_text_start,
+                       (unsigned long) __irqentry_text_end,
+                      _PAGE_RW | _PAGE_GLOBAL);
+}
+
+/*
+ * Initialize kernel page table isolation
+ */
+void __init pti_init(void)
+{
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+
+       pr_info("enabled\n");
+
+       pti_clone_user_shared();
+       pti_clone_entry_text();
+       pti_setup_espfix64();
+       pti_setup_vsyscall();
+}
index 3118392..5bfe61a 100644 (file)
  *     Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  */
 
+/*
+ * We get here when we do something requiring a TLB invalidation
+ * but could not go invalidate all of the contexts.  We do the
+ * necessary invalidation by clearing out the 'ctx_id' which
+ * forces a TLB flush when the context is loaded.
+ */
+void clear_asid_other(void)
+{
+       u16 asid;
+
+       /*
+        * This is only expected to be set if we have disabled
+        * kernel _PAGE_GLOBAL pages.
+        */
+       if (!static_cpu_has(X86_FEATURE_PTI)) {
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
+               /* Do not need to flush the current asid */
+               if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
+                       continue;
+               /*
+                * Make sure the next time we go to switch to
+                * this asid, we do a flush:
+                */
+               this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
+       }
+       this_cpu_write(cpu_tlbstate.invalidate_other, false);
+}
+
 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
 
 
@@ -42,6 +74,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
                return;
        }
 
+       if (this_cpu_read(cpu_tlbstate.invalidate_other))
+               clear_asid_other();
+
        for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
                if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
                    next->context.ctx_id)
@@ -65,6 +100,25 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
        *need_flush = true;
 }
 
+static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
+{
+       unsigned long new_mm_cr3;
+
+       if (need_flush) {
+               invalidate_user_asid(new_asid);
+               new_mm_cr3 = build_cr3(pgdir, new_asid);
+       } else {
+               new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
+       }
+
+       /*
+        * Caution: many callers of this function expect
+        * that load_cr3() is serializing and orders TLB
+        * fills with respect to the mm_cpumask writes.
+        */
+       write_cr3(new_mm_cr3);
+}
+
 void leave_mm(int cpu)
 {
        struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
@@ -97,6 +151,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        local_irq_restore(flags);
 }
 
+static void sync_current_stack_to_mm(struct mm_struct *mm)
+{
+       unsigned long sp = current_stack_pointer;
+       pgd_t *pgd = pgd_offset(mm, sp);
+
+       if (CONFIG_PGTABLE_LEVELS > 4) {
+               if (unlikely(pgd_none(*pgd))) {
+                       pgd_t *pgd_ref = pgd_offset_k(sp);
+
+                       set_pgd(pgd, *pgd_ref);
+               }
+       } else {
+               /*
+                * "pgd" is faked.  The top level entries are "p4d"s, so sync
+                * the p4d.  This compiles to approximately the same code as
+                * the 5-level case.
+                */
+               p4d_t *p4d = p4d_offset(pgd, sp);
+
+               if (unlikely(p4d_none(*p4d))) {
+                       pgd_t *pgd_ref = pgd_offset_k(sp);
+                       p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
+
+                       set_p4d(p4d, *p4d_ref);
+               }
+       }
+}
+
 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                        struct task_struct *tsk)
 {
@@ -128,7 +210,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
         * isn't free.
         */
 #ifdef CONFIG_DEBUG_VM
-       if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
+       if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
                /*
                 * If we were to BUG here, we'd be very likely to kill
                 * the system so hard that we don't see the call trace.
@@ -172,11 +254,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                         * mapped in the new pgd, we'll double-fault.  Forcibly
                         * map it.
                         */
-                       unsigned int index = pgd_index(current_stack_pointer);
-                       pgd_t *pgd = next->pgd + index;
-
-                       if (unlikely(pgd_none(*pgd)))
-                               set_pgd(pgd, init_mm.pgd[index]);
+                       sync_current_stack_to_mm(next);
                }
 
                /* Stop remote flushes for the previous mm */
@@ -195,7 +273,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                if (need_flush) {
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
-                       write_cr3(build_cr3(next, new_asid));
+                       load_new_mm_cr3(next->pgd, new_asid, true);
 
                        /*
                         * NB: This gets called via leave_mm() in the idle path
@@ -208,7 +286,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                        trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
                } else {
                        /* The new ASID is already up to date. */
-                       write_cr3(build_cr3_noflush(next, new_asid));
+                       load_new_mm_cr3(next->pgd, new_asid, false);
 
                        /* See above wrt _rcuidle. */
                        trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
@@ -288,7 +366,7 @@ void initialize_tlbstate_and_flush(void)
                !(cr4_read_shadow() & X86_CR4_PCIDE));
 
        /* Force ASID 0 and force a TLB flush. */
-       write_cr3(build_cr3(mm, 0));
+       write_cr3(build_cr3(mm->pgd, 0));
 
        /* Reinitialize tlbstate. */
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
@@ -551,7 +629,7 @@ static void do_kernel_range_flush(void *info)
 
        /* flush range by one by one 'invlpg' */
        for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
-               __flush_tlb_single(addr);
+               __flush_tlb_one(addr);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
index bb461cf..526536c 100644 (file)
@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
         * We should get host bridge information from ACPI unless the BIOS
         * doesn't support it.
         */
-       if (acpi_os_get_root_pointer())
+       if (!acpi_disabled && acpi_os_get_root_pointer())
                return 0;
 #endif
 
index 7a5350d..563049c 100644 (file)
@@ -594,6 +594,11 @@ char *__init pcibios_setup(char *str)
        } else if (!strcmp(str, "nocrs")) {
                pci_probe |= PCI_ROOT_NO_CRS;
                return NULL;
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       } else if (!strcmp(str, "big_root_window")) {
+               pci_probe |= PCI_BIG_ROOT_WINDOW;
+               return NULL;
+#endif
        } else if (!strcmp(str, "earlydump")) {
                pci_early_dump_regs = 1;
                return NULL;
index 1e996df..54ef19e 100644 (file)
@@ -662,9 +662,23 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
  */
 static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
 {
-       unsigned i;
-       u32 base, limit, high;
+       static const char *name = "PCI Bus 0000:00";
        struct resource *res, *conflict;
+       u32 base, limit, high;
+       struct pci_dev *other;
+       unsigned i;
+
+       if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
+               return;
+
+       /* Check that we are the only device of that type */
+       other = pci_get_device(dev->vendor, dev->device, NULL);
+       if (other != dev ||
+           (other = pci_get_device(dev->vendor, dev->device, other))) {
+               /* This is a multi-socket system, don't touch it for now */
+               pci_dev_put(other);
+               return;
+       }
 
        for (i = 0; i < 8; i++) {
                pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
@@ -689,17 +703,30 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
        if (!res)
                return;
 
-       res->name = "PCI Bus 0000:00";
+       /*
+        * Allocate a 256GB window directly below the 0xfd00000000 hardware
+        * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
+        */
+       res->name = name;
        res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
                IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
-       res->start = 0x100000000ull;
+       res->start = 0xbd00000000ull;
        res->end = 0xfd00000000ull - 1;
 
-       /* Just grab the free area behind system memory for this */
-       while ((conflict = request_resource_conflict(&iomem_resource, res)))
-               res->start = conflict->end + 1;
+       conflict = request_resource_conflict(&iomem_resource, res);
+       if (conflict) {
+               kfree(res);
+               if (conflict->name != name)
+                       return;
 
-       dev_info(&dev->dev, "adding root bus resource %pR\n", res);
+               /* We are resuming from suspend; just reenable the window */
+               res = conflict;
+       } else {
+               dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
+                        res);
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+               pci_bus_add_resource(dev->bus, res, 0);
+       }
 
        base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
                AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
@@ -711,13 +738,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
        pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
        pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
        pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
-
-       pci_bus_add_resource(dev->bus, res, 0);
 }
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
 
 #endif
index 6a151ce..2dd15e9 100644 (file)
@@ -135,7 +135,9 @@ pgd_t * __init efi_call_phys_prolog(void)
                                pud[j] = *pud_offset(p4d_k, vaddr);
                        }
                }
+               pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
        }
+
 out:
        __flush_tlb_all();
 
@@ -196,6 +198,9 @@ static pgd_t *efi_pgd;
  * because we want to avoid inserting EFI region mappings (EFI_VA_END
  * to EFI_VA_START) into the standard kernel page tables. Everything
  * else can be shared, see efi_sync_low_kernel_mappings().
+ *
+ * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
+ * allocation.
  */
 int __init efi_alloc_page_tables(void)
 {
@@ -208,7 +213,7 @@ int __init efi_alloc_page_tables(void)
                return 0;
 
        gfp_mask = GFP_KERNEL | __GFP_ZERO;
-       efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+       efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
        if (!efi_pgd)
                return -ENOMEM;
 
index 8a99a2e..5b513cc 100644 (file)
@@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
        /*
         * Update the first page pointer to skip over the CSH header.
         */
-       cap_info->pages[0] += csh->headersize;
+       cap_info->phys[0] += csh->headersize;
+
+       /*
+        * cap_info->capsule should point at a virtual mapping of the entire
+        * capsule, starting at the capsule header. Our image has the Quark
+        * security header prepended, so we cannot rely on the default vmap()
+        * mapping created by the generic capsule code.
+        * Given that the Quark firmware does not appear to care about the
+        * virtual mapping, let's just point cap_info->capsule at our copy
+        * of the capsule header.
+        */
+       cap_info->capsule = &cap_info->header;
 
        return 1;
 }
index dc036e5..5a0483e 100644 (file)
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata)
        return 0;
 }
 
-static const struct bt_sfi_data tng_bt_sfi_data __initdata = {
+static struct bt_sfi_data tng_bt_sfi_data __initdata = {
        .setup  = tng_bt_sfi_setup,
 };
 
index f44c0bc..8538a67 100644 (file)
@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
                local_flush_tlb();
                stat->d_alltlb++;
        } else {
-               __flush_tlb_one(msg->address);
+               __flush_tlb_single(msg->address);
                stat->d_onetlb++;
        }
        stat->d_requestee++;
index 5f6fd86..e4cb9f4 100644 (file)
@@ -128,7 +128,7 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
  * on the specified blade to allow the sending of MSIs to the specified CPU.
  */
 static int uv_domain_activate(struct irq_domain *domain,
-                             struct irq_data *irq_data, bool early)
+                             struct irq_data *irq_data, bool reserve)
 {
        uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
        return 0;
index c34bd82..5f64f30 100644 (file)
@@ -905,7 +905,7 @@ static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
 /*
  * UV NMI handler
  */
-int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
+static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
 {
        struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
        int cpu = smp_processor_id();
@@ -1013,7 +1013,7 @@ void uv_nmi_init(void)
 }
 
 /* Setup HUB NMI info */
-void __init uv_nmi_setup_common(bool hubbed)
+static void __init uv_nmi_setup_common(bool hubbed)
 {
        int size = sizeof(void *) * (1 << NODES_SHIFT);
        int cpu;
index 84fcfde..a7d9669 100644 (file)
@@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context *ctxt)
        /*
         * descriptor tables
         */
-#ifdef CONFIG_X86_32
        store_idt(&ctxt->idt);
-#else
-/* CONFIG_X86_64 */
-       store_idt((struct desc_ptr *)&ctxt->idt_limit);
-#endif
+
        /*
         * We save it here, but restore it only in the hibernate case.
         * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
@@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
        /*
         * segment registers
         */
-#ifdef CONFIG_X86_32
-       savesegment(es, ctxt->es);
-       savesegment(fs, ctxt->fs);
+#ifdef CONFIG_X86_32_LAZY_GS
        savesegment(gs, ctxt->gs);
-       savesegment(ss, ctxt->ss);
-#else
-/* CONFIG_X86_64 */
-       asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
-       asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
-       asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
-       asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
-       asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
+#endif
+#ifdef CONFIG_X86_64
+       savesegment(gs, ctxt->gs);
+       savesegment(fs, ctxt->fs);
+       savesegment(ds, ctxt->ds);
+       savesegment(es, ctxt->es);
 
        rdmsrl(MSR_FS_BASE, ctxt->fs_base);
-       rdmsrl(MSR_GS_BASE, ctxt->gs_base);
-       rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+       rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+       rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
        mtrr_save_fixed_ranges(NULL);
 
        rdmsrl(MSR_EFER, ctxt->efer);
@@ -160,17 +152,19 @@ static void do_fpu_end(void)
 static void fix_processor_context(void)
 {
        int cpu = smp_processor_id();
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
 #ifdef CONFIG_X86_64
        struct desc_struct *desc = get_cpu_gdt_rw(cpu);
        tss_desc tss;
 #endif
-       set_tss_desc(cpu, t);   /*
-                                * This just modifies memory; should not be
-                                * necessary. But... This is necessary, because
-                                * 386 hardware has concept of busy TSS or some
-                                * similar stupidity.
-                                */
+
+       /*
+        * We need to reload TR, which requires that we change the
+        * GDT entry to indicate "available" first.
+        *
+        * XXX: This could probably all be replaced by a call to
+        * force_reload_TR().
+        */
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 
 #ifdef CONFIG_X86_64
        memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
@@ -178,6 +172,9 @@ static void fix_processor_context(void)
        write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
 
        syscall_init();                         /* This sets MSR_*STAR and related */
+#else
+       if (boot_cpu_has(X86_FEATURE_SEP))
+               enable_sep_cpu();
 #endif
        load_TR_desc();                         /* This does ltr */
        load_mm_ldt(current->active_mm);        /* This does lldt */
@@ -190,9 +187,12 @@ static void fix_processor_context(void)
 }
 
 /**
- *     __restore_processor_state - restore the contents of CPU registers saved
- *             by __save_processor_state()
- *     @ctxt - structure to load the registers contents from
+ * __restore_processor_state - restore the contents of CPU registers saved
+ *                             by __save_processor_state()
+ * @ctxt - structure to load the registers contents from
+ *
+ * The asm code that gets us here will have restored a usable GDT, although
+ * it will be pointing to the wrong alias.
  */
 static void notrace __restore_processor_state(struct saved_context *ctxt)
 {
@@ -215,46 +215,52 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        write_cr2(ctxt->cr2);
        write_cr0(ctxt->cr0);
 
+       /* Restore the IDT. */
+       load_idt(&ctxt->idt);
+
        /*
-        * now restore the descriptor tables to their proper values
-        * ltr is done i fix_processor_context().
+        * Just in case the asm code got us here with the SS, DS, or ES
+        * out of sync with the GDT, update them.
         */
-#ifdef CONFIG_X86_32
-       load_idt(&ctxt->idt);
+       loadsegment(ss, __KERNEL_DS);
+       loadsegment(ds, __USER_DS);
+       loadsegment(es, __USER_DS);
+
+       /*
+        * Restore percpu access.  Percpu access can happen in exception
+        * handlers or in complicated helpers like load_gs_index().
+        */
+#ifdef CONFIG_X86_64
+       wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
 #else
-/* CONFIG_X86_64 */
-       load_idt((const struct desc_ptr *)&ctxt->idt_limit);
+       loadsegment(fs, __KERNEL_PERCPU);
+       loadsegment(gs, __KERNEL_STACK_CANARY);
 #endif
 
+       /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
+       fix_processor_context();
+
        /*
-        * segment registers
+        * Now that we have descriptor tables fully restored and working
+        * exception handling, restore the usermode segments.
         */
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_64
+       loadsegment(ds, ctxt->es);
        loadsegment(es, ctxt->es);
        loadsegment(fs, ctxt->fs);
-       loadsegment(gs, ctxt->gs);
-       loadsegment(ss, ctxt->ss);
+       load_gs_index(ctxt->gs);
 
        /*
-        * sysenter MSRs
+        * Restore FSBASE and GSBASE after restoring the selectors, since
+        * restoring the selectors clobbers the bases.  Keep in mind
+        * that MSR_KERNEL_GS_BASE is horribly misnamed.
         */
-       if (boot_cpu_has(X86_FEATURE_SEP))
-               enable_sep_cpu();
-#else
-/* CONFIG_X86_64 */
-       asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
-       asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
-       asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
-       load_gs_index(ctxt->gs);
-       asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
-
        wrmsrl(MSR_FS_BASE, ctxt->fs_base);
-       wrmsrl(MSR_GS_BASE, ctxt->gs_base);
-       wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+       wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+#elif defined(CONFIG_X86_32_LAZY_GS)
+       loadsegment(gs, ctxt->gs);
 #endif
 
-       fix_processor_context();
-
        do_fpu_end();
        tsc_verify_tsc_adjust(true);
        x86_platform.restore_sched_clock_state();
index 6b830d4..de58533 100644 (file)
@@ -57,7 +57,7 @@ static u32 xen_apic_read(u32 reg)
                return 0;
 
        if (reg == APIC_LVR)
-               return 0x10;
+               return 0x14;
 #ifdef CONFIG_X86_32
        if (reg == APIC_LDR)
                return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
index d669e9d..c9081c6 100644 (file)
@@ -1,8 +1,12 @@
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+#include <linux/bootmem.h>
+#endif
 #include <linux/cpu.h>
 #include <linux/kexec.h>
 
 #include <xen/features.h>
 #include <xen/page.h>
+#include <xen/interface/memory.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num)
 }
 EXPORT_SYMBOL(xen_arch_unregister_cpu);
 #endif
+
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+void __init arch_xen_balloon_init(struct resource *hostmem_resource)
+{
+       struct xen_memory_map memmap;
+       int rc;
+       unsigned int i, last_guest_ram;
+       phys_addr_t max_addr = PFN_PHYS(max_pfn);
+       struct e820_table *xen_e820_table;
+       const struct e820_entry *entry;
+       struct resource *res;
+
+       if (!xen_initial_domain())
+               return;
+
+       xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
+       if (!xen_e820_table)
+               return;
+
+       memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
+       set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
+       rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
+       if (rc) {
+               pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
+               goto out;
+       }
+
+       last_guest_ram = 0;
+       for (i = 0; i < memmap.nr_entries; i++) {
+               if (xen_e820_table->entries[i].addr >= max_addr)
+                       break;
+               if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
+                       last_guest_ram = i;
+       }
+
+       entry = &xen_e820_table->entries[last_guest_ram];
+       if (max_addr >= entry->addr + entry->size)
+               goto out; /* No unallocated host RAM. */
+
+       hostmem_resource->start = max_addr;
+       hostmem_resource->end = entry->addr + entry->size;
+
+       /*
+        * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
+        * as unavailable. The rest of that region can be used for hotplug-based
+        * ballooning.
+        */
+       for (; i < memmap.nr_entries; i++) {
+               entry = &xen_e820_table->entries[i];
+
+               if (entry->type == E820_TYPE_RAM)
+                       continue;
+
+               if (entry->addr >= hostmem_resource->end)
+                       break;
+
+               res = kzalloc(sizeof(*res), GFP_KERNEL);
+               if (!res)
+                       goto out;
+
+               res->name = "Unavailable host RAM";
+               res->start = entry->addr;
+               res->end = (entry->addr + entry->size < hostmem_resource->end) ?
+                           entry->addr + entry->size : hostmem_resource->end;
+               rc = insert_resource(hostmem_resource, res);
+               if (rc) {
+                       pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
+                               __func__, res->start, res->end, rc);
+                       kfree(res);
+                       goto  out;
+               }
+       }
+
+ out:
+       kfree(xen_e820_table);
+}
+#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
index 5b2b3f3..c047f42 100644 (file)
@@ -88,6 +88,8 @@
 #include "multicalls.h"
 #include "pmu.h"
 
+#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
+
 void *xen_initial_gdt;
 
 static int xen_cpu_up_prepare_pv(unsigned int cpu);
@@ -622,7 +624,7 @@ static struct trap_array_entry trap_array[] = {
        { simd_coprocessor_error,      xen_simd_coprocessor_error,      false },
 };
 
-static bool get_trap_addr(void **addr, unsigned int ist)
+static bool __ref get_trap_addr(void **addr, unsigned int ist)
 {
        unsigned int nr;
        bool ist_okay = false;
@@ -644,6 +646,14 @@ static bool get_trap_addr(void **addr, unsigned int ist)
                }
        }
 
+       if (nr == ARRAY_SIZE(trap_array) &&
+           *addr >= (void *)early_idt_handler_array[0] &&
+           *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
+               nr = (*addr - (void *)early_idt_handler_array[0]) /
+                    EARLY_IDT_HANDLER_SIZE;
+               *addr = (void *)xen_early_idt_handler_array[nr];
+       }
+
        if (WARN_ON(ist != 0 && !ist_okay))
                return false;
 
@@ -818,7 +828,7 @@ static void xen_load_sp0(unsigned long sp0)
        mcs = xen_mc_entry(0);
        MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
        xen_mc_issue(PARAVIRT_LAZY_CPU);
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 void xen_set_iopl_mask(unsigned mask)
@@ -1250,6 +1260,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
        __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 
        /* Work out if we support NX */
+       get_cpu_cap(&boot_cpu_data);
        x86_configure_nx();
 
        /* Get mfn list */
@@ -1262,6 +1273,21 @@ asmlinkage __visible void __init xen_start_kernel(void)
        xen_setup_gdt(0);
 
        xen_init_irq_ops();
+
+       /* Let's presume PV guests always boot on vCPU with id 0. */
+       per_cpu(xen_vcpu_id, 0) = 0;
+
+       /*
+        * Setup xen_vcpu early because idt_setup_early_handler needs it for
+        * local_irq_disable(), irqs_disabled().
+        *
+        * Don't do the full vcpu_info placement stuff until we have
+        * the cpu_possible_mask and a non-dummy shared_info.
+        */
+       xen_vcpu_info_reset(0);
+
+       idt_setup_early_handler();
+
        xen_init_capabilities();
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -1295,18 +1321,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
         */
        acpi_numa = -1;
 #endif
-       /* Let's presume PV guests always boot on vCPU with id 0. */
-       per_cpu(xen_vcpu_id, 0) = 0;
-
-       /*
-        * Setup xen_vcpu early because start_kernel needs it for
-        * local_irq_disable(), irqs_disabled().
-        *
-        * Don't do the full vcpu_info placement stuff until we have
-        * the cpu_possible_mask and a non-dummy shared_info.
-        */
-       xen_vcpu_info_reset(0);
-
        WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
 
        local_irq_disable();
index fc048ec..d850762 100644 (file)
@@ -1325,20 +1325,18 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 {
        struct {
                struct mmuext_op op;
-#ifdef CONFIG_SMP
-               DECLARE_BITMAP(mask, num_processors);
-#else
                DECLARE_BITMAP(mask, NR_CPUS);
-#endif
        } *args;
        struct multicall_space mcs;
+       const size_t mc_entry_size = sizeof(args->op) +
+               sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
 
        trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
 
        if (cpumask_empty(cpus))
                return;         /* nothing to do */
 
-       mcs = xen_mc_entry(sizeof(*args));
+       mcs = xen_mc_entry(mc_entry_size);
        args = mcs.args;
        args->op.arg2.vcpumask = to_cpumask(args->mask);
 
@@ -1902,6 +1900,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        /* Graft it onto L4[511][510] */
        copy_page(level2_kernel_pgt, l2);
 
+       /*
+        * Zap execute permission from the ident map. Due to the sharing of
+        * L1 entries we need to do this in the L2.
+        */
+       if (__supported_pte_mask & _PAGE_NX) {
+               for (i = 0; i < PTRS_PER_PMD; ++i) {
+                       if (pmd_none(level2_ident_pgt[i]))
+                               continue;
+                       level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
+               }
+       }
+
        /* Copy the initial P->M table mappings if necessary. */
        i = pgd_index(xen_start_info->mfn_list);
        if (i && i < pgd_index(__START_KERNEL_map))
@@ -2261,7 +2271,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 
        switch (idx) {
        case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
-       case FIX_RO_IDT:
 #ifdef CONFIG_X86_32
        case FIX_WP_TEST:
 # ifdef CONFIG_HIGHMEM
@@ -2272,7 +2281,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #endif
        case FIX_TEXT_POKE0:
        case FIX_TEXT_POKE1:
-       case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
                /* All local page mappings */
                pte = pfn_pte(phys, prot);
                break;
index c114ca7..6e0d208 100644 (file)
@@ -808,7 +808,6 @@ char * __init xen_memory_setup(void)
        addr = xen_e820_table.entries[0].addr;
        size = xen_e820_table.entries[0].size;
        while (i < xen_e820_table.nr_entries) {
-               bool discard = false;
 
                chunk_size = size;
                type = xen_e820_table.entries[i].type;
@@ -824,11 +823,10 @@ char * __init xen_memory_setup(void)
                                xen_add_extra_mem(pfn_s, n_pfns);
                                xen_max_p2m_pfn = pfn_s + n_pfns;
                        } else
-                               discard = true;
+                               type = E820_TYPE_UNUSABLE;
                }
 
-               if (!discard)
-                       xen_align_and_add_e820_region(addr, chunk_size, type);
+               xen_align_and_add_e820_region(addr, chunk_size, type);
 
                addr += chunk_size;
                size -= chunk_size;
index 8a10c9a..417b339 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <xen/interface/xen.h>
 
+#include <linux/init.h>
 #include <linux/linkage.h>
 
 .macro xen_pv_trap name
@@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat
 #endif
 xen_pv_trap hypervisor_callback
 
+       __INIT
+ENTRY(xen_early_idt_handler_array)
+       i = 0
+       .rept NUM_EXCEPTION_VECTORS
+       pop %rcx
+       pop %r11
+       jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
+       i = i + 1
+       .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
+       .endr
+END(xen_early_idt_handler_array)
+       __FINIT
+
 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
 /*
  * Xen64 iret frame:
index 75011b8..3b34745 100644 (file)
@@ -72,7 +72,7 @@ u64 xen_clocksource_read(void);
 void xen_setup_cpu_clockevents(void);
 void xen_save_time_memory_area(void);
 void xen_restore_time_memory_area(void);
-void __init xen_init_time_ops(void);
+void __ref xen_init_time_ops(void);
 void __init xen_hvm_init_time_ops(void);
 
 irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
index 7be2400..2ccd375 100644 (file)
@@ -77,9 +77,6 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
index a5bcdfb..837d4dd 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 228229f..9ef6cf3 100644 (file)
@@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
        bio->bi_disk = bio_src->bi_disk;
        bio->bi_partno = bio_src->bi_partno;
        bio_set_flag(bio, BIO_CLONED);
+       if (bio_flagged(bio_src, BIO_THROTTLED))
+               bio_set_flag(bio, BIO_THROTTLED);
        bio->bi_opf = bio_src->bi_opf;
        bio->bi_write_hint = bio_src->bi_write_hint;
        bio->bi_iter = bio_src->bi_iter;
@@ -1819,7 +1821,7 @@ EXPORT_SYMBOL(bio_endio);
 struct bio *bio_split(struct bio *bio, int sectors,
                      gfp_t gfp, struct bio_set *bs)
 {
-       struct bio *split = NULL;
+       struct bio *split;
 
        BUG_ON(sectors <= 0);
        BUG_ON(sectors >= bio_sectors(bio));
index b888175..3ba4326 100644 (file)
@@ -562,6 +562,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
        }
 }
 
+void blk_drain_queue(struct request_queue *q)
+{
+       spin_lock_irq(q->queue_lock);
+       __blk_drain_queue(q, true);
+       spin_unlock_irq(q->queue_lock);
+}
+
 /**
  * blk_queue_bypass_start - enter queue bypass mode
  * @q: queue of interest
@@ -689,8 +696,6 @@ void blk_cleanup_queue(struct request_queue *q)
         */
        blk_freeze_queue(q);
        spin_lock_irq(lock);
-       if (!q->mq_ops)
-               __blk_drain_queue(q, true);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
index b21f8e8..d3a9471 100644 (file)
 #include "blk.h"
 
 /*
- * Append a bio to a passthrough request.  Only works can be merged into
- * the request based on the driver constraints.
+ * Append a bio to a passthrough request.  Only works if the bio can be merged
+ * into the request based on the driver constraints.
  */
-int blk_rq_append_bio(struct request *rq, struct bio *bio)
+int blk_rq_append_bio(struct request *rq, struct bio **bio)
 {
-       blk_queue_bounce(rq->q, &bio);
+       struct bio *orig_bio = *bio;
+
+       blk_queue_bounce(rq->q, bio);
 
        if (!rq->bio) {
-               blk_rq_bio_prep(rq->q, rq, bio);
+               blk_rq_bio_prep(rq->q, rq, *bio);
        } else {
-               if (!ll_back_merge_fn(rq->q, rq, bio))
+               if (!ll_back_merge_fn(rq->q, rq, *bio)) {
+                       if (orig_bio != *bio) {
+                               bio_put(*bio);
+                               *bio = orig_bio;
+                       }
                        return -EINVAL;
+               }
 
-               rq->biotail->bi_next = bio;
-               rq->biotail = bio;
-               rq->__data_len += bio->bi_iter.bi_size;
+               rq->biotail->bi_next = *bio;
+               rq->biotail = *bio;
+               rq->__data_len += (*bio)->bi_iter.bi_size;
        }
 
        return 0;
@@ -73,14 +80,12 @@ static int __blk_rq_map_user_iov(struct request *rq,
         * We link the bounce buffer in and could have to traverse it
         * later so we have to get a ref to prevent it from being freed
         */
-       ret = blk_rq_append_bio(rq, bio);
-       bio_get(bio);
+       ret = blk_rq_append_bio(rq, &bio);
        if (ret) {
-               bio_endio(bio);
                __blk_rq_unmap_user(orig_bio);
-               bio_put(bio);
                return ret;
        }
+       bio_get(bio);
 
        return 0;
 }
@@ -213,7 +218,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        int reading = rq_data_dir(rq) == READ;
        unsigned long addr = (unsigned long) kbuf;
        int do_copy = 0;
-       struct bio *bio;
+       struct bio *bio, *orig_bio;
        int ret;
 
        if (len > (queue_max_hw_sectors(q) << 9))
@@ -236,10 +241,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (do_copy)
                rq->rq_flags |= RQF_COPY_USER;
 
-       ret = blk_rq_append_bio(rq, bio);
+       orig_bio = bio;
+       ret = blk_rq_append_bio(rq, &bio);
        if (unlikely(ret)) {
                /* request is too big */
-               bio_put(bio);
+               bio_put(orig_bio);
                return ret;
        }
 
index 1109747..3d37973 100644 (file)
@@ -161,6 +161,8 @@ void blk_freeze_queue(struct request_queue *q)
         * exported to drivers as the only user for unfreeze is blk_mq.
         */
        blk_freeze_queue_start(q);
+       if (!q->mq_ops)
+               blk_drain_queue(q);
        blk_mq_freeze_queue_wait(q);
 }
 
index e54be40..870484e 100644 (file)
@@ -450,12 +450,9 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
                ret = wbt_init(q);
                if (ret)
                        return ret;
-
-               rwb = q->rq_wb;
-               if (!rwb)
-                       return -EINVAL;
        }
 
+       rwb = q->rq_wb;
        if (val == -1)
                rwb->min_lat_nsec = wbt_default_latency_nsec(q);
        else if (val >= 0)
index 825bc29..d19f416 100644 (file)
@@ -2226,13 +2226,7 @@ again:
 out_unlock:
        spin_unlock_irq(q->queue_lock);
 out:
-       /*
-        * As multiple blk-throtls may stack in the same issue path, we
-        * don't want bios to leave with the flag set.  Clear the flag if
-        * being issued.
-        */
-       if (!throttled)
-               bio_clear_flag(bio, BIO_THROTTLED);
+       bio_set_flag(bio, BIO_THROTTLED);
 
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
        if (throttled || !td->track_bio_latency)
index b252da0..ae8de97 100644 (file)
@@ -178,12 +178,11 @@ void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
 
                if (wbt_is_read(stat))
                        wb_timestamp(rwb, &rwb->last_comp);
-               wbt_clear_state(stat);
        } else {
                WARN_ON_ONCE(stat == rwb->sync_cookie);
                __wbt_done(rwb, wbt_stat_to_mask(stat));
-               wbt_clear_state(stat);
        }
+       wbt_clear_state(stat);
 }
 
 /*
@@ -482,7 +481,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 
        /*
         * At this point we know it's a buffered write. If this is
-        * kswapd trying to free memory, or REQ_SYNC is set, set, then
+        * kswapd trying to free memory, or REQ_SYNC is set, then
         * it's WB_SYNC_ALL writeback, and we'll use the max limit for
         * that. If the write is marked as a background write, then use
         * the idle limit, or go to normal if we haven't had competing
@@ -723,8 +722,6 @@ int wbt_init(struct request_queue *q)
                init_waitqueue_head(&rwb->rq_wait[i].wait);
        }
 
-       rwb->wc = 1;
-       rwb->queue_depth = RWB_DEF_DEPTH;
        rwb->last_comp = rwb->last_issue = jiffies;
        rwb->queue = q;
        rwb->win_nsec = RWB_WINDOW_NSEC;
index 3f14469..442098a 100644 (file)
@@ -330,4 +330,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
 }
 #endif /* CONFIG_BOUNCE */
 
+extern void blk_drain_queue(struct request_queue *q);
+
 #endif /* BLK_INTERNAL_H */
index fceb1a9..1d05c42 100644 (file)
@@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        unsigned i = 0;
        bool bounce = false;
        int sectors = 0;
+       bool passthrough = bio_is_passthrough(*bio_orig);
 
        bio_for_each_segment(from, *bio_orig, iter) {
                if (i++ < BIO_MAX_PAGES)
@@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        if (!bounce)
                return;
 
-       if (sectors < bio_sectors(*bio_orig)) {
+       if (!passthrough && sectors < bio_sectors(*bio_orig)) {
                bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
                bio_chain(bio, *bio_orig);
                generic_make_request(*bio_orig);
                *bio_orig = bio;
        }
-       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
+       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
+                       bounce_bio_set);
 
        bio_for_each_segment_all(to, bio, i) {
                struct page *page = to->bv_page;
index c2223f1..96a66f6 100644 (file)
@@ -671,10 +671,13 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
                disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
                disk->flags |= GENHD_FL_NO_PART_SCAN;
        } else {
+               int ret;
+
                /* Register BDI before referencing it from bdev */
                disk_to_dev(disk)->devt = devt;
-               bdi_register_owner(disk->queue->backing_dev_info,
-                               disk_to_dev(disk));
+               ret = bdi_register_owner(disk->queue->backing_dev_info,
+                                               disk_to_dev(disk));
+               WARN_ON(ret);
                blk_register_region(disk_devt(disk), disk->minors, NULL,
                                    exact_match, exact_lock, disk);
        }
@@ -1389,7 +1392,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
 
        if (minors > DISK_MAX_PARTS) {
                printk(KERN_ERR
-                       "block: can't allocated more than %d partitions\n",
+                       "block: can't allocate more than %d partitions\n",
                        DISK_MAX_PARTS);
                minors = DISK_MAX_PARTS;
        }
index b4df317..f95c607 100644 (file)
@@ -100,9 +100,13 @@ struct kyber_hctx_data {
        unsigned int cur_domain;
        unsigned int batching;
        wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
+       struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
        atomic_t wait_index[KYBER_NUM_DOMAINS];
 };
 
+static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
+                            void *key);
+
 static int rq_sched_domain(const struct request *rq)
 {
        unsigned int op = rq->cmd_flags;
@@ -385,6 +389,9 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 
        for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
                INIT_LIST_HEAD(&khd->rqs[i]);
+               init_waitqueue_func_entry(&khd->domain_wait[i],
+                                         kyber_domain_wake);
+               khd->domain_wait[i].private = hctx;
                INIT_LIST_HEAD(&khd->domain_wait[i].entry);
                atomic_set(&khd->wait_index[i], 0);
        }
@@ -524,35 +531,39 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
        int nr;
 
        nr = __sbitmap_queue_get(domain_tokens);
-       if (nr >= 0)
-               return nr;
 
        /*
         * If we failed to get a domain token, make sure the hardware queue is
         * run when one becomes available. Note that this is serialized on
         * khd->lock, but we still need to be careful about the waker.
         */
-       if (list_empty_careful(&wait->entry)) {
-               init_waitqueue_func_entry(wait, kyber_domain_wake);
-               wait->private = hctx;
+       if (nr < 0 && list_empty_careful(&wait->entry)) {
                ws = sbq_wait_ptr(domain_tokens,
                                  &khd->wait_index[sched_domain]);
+               khd->domain_ws[sched_domain] = ws;
                add_wait_queue(&ws->wait, wait);
 
                /*
                 * Try again in case a token was freed before we got on the wait
-                * queue. The waker may have already removed the entry from the
-                * wait queue, but list_del_init() is okay with that.
+                * queue.
                 */
                nr = __sbitmap_queue_get(domain_tokens);
-               if (nr >= 0) {
-                       unsigned long flags;
+       }
 
-                       spin_lock_irqsave(&ws->wait.lock, flags);
-                       list_del_init(&wait->entry);
-                       spin_unlock_irqrestore(&ws->wait.lock, flags);
-               }
+       /*
+        * If we got a token while we were on the wait queue, remove ourselves
+        * from the wait queue to ensure that all wake ups make forward
+        * progress. It's possible that the waker already deleted the entry
+        * between the !list_empty_careful() check and us grabbing the lock, but
+        * list_del_init() is okay with that.
+        */
+       if (nr >= 0 && !list_empty_careful(&wait->entry)) {
+               ws = khd->domain_ws[sched_domain];
+               spin_lock_irq(&ws->wait.lock);
+               list_del_init(&wait->entry);
+               spin_unlock_irq(&ws->wait.lock);
        }
+
        return nr;
 }
 
index 85cea9d..35d4dce 100644 (file)
@@ -664,7 +664,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
        unsigned int i;
 
        list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
-               ctx->rcvused -= rsgl->sg_num_bytes;
+               atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
                af_alg_free_sg(&rsgl->sgl);
                list_del(&rsgl->list);
                if (rsgl != &areq->first_rsgl)
@@ -672,14 +672,15 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
        }
 
        tsgl = areq->tsgl;
-       for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
-               if (!sg_page(sg))
-                       continue;
-               put_page(sg_page(sg));
-       }
+       if (tsgl) {
+               for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
+                       if (!sg_page(sg))
+                               continue;
+                       put_page(sg_page(sg));
+               }
 
-       if (areq->tsgl && areq->tsgl_entries)
                sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
+       }
 }
 EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
 
@@ -1020,6 +1021,18 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(af_alg_sendpage);
 
+/**
+ * af_alg_free_resources - release resources required for crypto request
+ */
+void af_alg_free_resources(struct af_alg_async_req *areq)
+{
+       struct sock *sk = areq->sk;
+
+       af_alg_free_areq_sgls(areq);
+       sock_kfree_s(sk, areq, areq->areqlen);
+}
+EXPORT_SYMBOL_GPL(af_alg_free_resources);
+
 /**
  * af_alg_async_cb - AIO callback handler
  *
@@ -1036,18 +1049,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
        struct kiocb *iocb = areq->iocb;
        unsigned int resultlen;
 
-       lock_sock(sk);
-
        /* Buffer size written by crypto operation. */
        resultlen = areq->outlen;
 
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
-       __sock_put(sk);
+       af_alg_free_resources(areq);
+       sock_put(sk);
 
        iocb->ki_complete(iocb, err ? err : resultlen, 0);
-
-       release_sock(sk);
 }
 EXPORT_SYMBOL_GPL(af_alg_async_cb);
 
@@ -1130,12 +1138,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
                if (!af_alg_readable(sk))
                        break;
 
-               if (!ctx->used) {
-                       err = af_alg_wait_for_data(sk, flags);
-                       if (err)
-                               return err;
-               }
-
                seglen = min_t(size_t, (maxsize - len),
                               msg_data_left(msg));
 
@@ -1161,7 +1163,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
 
                areq->last_rsgl = rsgl;
                len += err;
-               ctx->rcvused += err;
+               atomic_add(err, &ctx->rcvused);
                rsgl->sg_num_bytes = err;
                iov_iter_advance(&msg->msg_iter, err);
        }
index 60d7366..9a636f9 100644 (file)
@@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
 
                        spawn->alg = NULL;
                        spawns = &inst->alg.cra_users;
+
+                       /*
+                        * We may encounter an unregistered instance here, since
+                        * an instance's spawns are set up prior to the instance
+                        * being registered.  An unregistered instance will have
+                        * NULL ->cra_users.next, since ->cra_users isn't
+                        * properly initialized until registration.  But an
+                        * unregistered instance cannot have any users, so treat
+                        * it the same as ->cra_users being empty.
+                        */
+                       if (spawns->next == NULL)
+                               break;
                }
        } while ((spawns = crypto_more_spawns(alg, &stack, &top,
                                              &secondary_spawns)));
index aacae08..e9885a3 100644 (file)
@@ -101,16 +101,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
        struct aead_tfm *aeadc = pask->private;
        struct crypto_aead *tfm = aeadc->aead;
        struct crypto_skcipher *null_tfm = aeadc->null_tfm;
-       unsigned int as = crypto_aead_authsize(tfm);
+       unsigned int i, as = crypto_aead_authsize(tfm);
        struct af_alg_async_req *areq;
-       struct af_alg_tsgl *tsgl;
-       struct scatterlist *src;
+       struct af_alg_tsgl *tsgl, *tmp;
+       struct scatterlist *rsgl_src, *tsgl_src = NULL;
        int err = 0;
        size_t used = 0;                /* [in]  TX bufs to be en/decrypted */
        size_t outlen = 0;              /* [out] RX bufs produced by kernel */
        size_t usedpages = 0;           /* [in]  RX bufs to be used from user */
        size_t processed = 0;           /* [in]  TX bufs to be consumed */
 
+       if (!ctx->used) {
+               err = af_alg_wait_for_data(sk, flags);
+               if (err)
+                       return err;
+       }
+
        /*
         * Data length provided by caller via sendmsg/sendpage that has not
         * yet been processed.
@@ -178,7 +184,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
        }
 
        processed = used + ctx->aead_assoclen;
-       tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
+       list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
+               for (i = 0; i < tsgl->cur; i++) {
+                       struct scatterlist *process_sg = tsgl->sg + i;
+
+                       if (!(process_sg->length) || !sg_page(process_sg))
+                               continue;
+                       tsgl_src = process_sg;
+                       break;
+               }
+               if (tsgl_src)
+                       break;
+       }
+       if (processed && !tsgl_src) {
+               err = -EFAULT;
+               goto free;
+       }
 
        /*
         * Copy of AAD from source to destination
@@ -194,7 +215,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
         */
 
        /* Use the RX SGL as source (and destination) for crypto op. */
-       src = areq->first_rsgl.sgl.sg;
+       rsgl_src = areq->first_rsgl.sgl.sg;
 
        if (ctx->enc) {
                /*
@@ -207,7 +228,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                 *          v      v
                 * RX SGL: AAD || PT || Tag
                 */
-               err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+               err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
                                           areq->first_rsgl.sgl.sg, processed);
                if (err)
                        goto free;
@@ -225,7 +246,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                 */
 
                 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
-               err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+               err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
                                           areq->first_rsgl.sgl.sg, outlen);
                if (err)
                        goto free;
@@ -257,23 +278,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                                 areq->tsgl);
                } else
                        /* no RX SGL present (e.g. authentication only) */
-                       src = areq->tsgl;
+                       rsgl_src = areq->tsgl;
        }
 
        /* Initialize the crypto operation */
-       aead_request_set_crypt(&areq->cra_u.aead_req, src,
+       aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
                               areq->first_rsgl.sgl.sg, used, ctx->iv);
        aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
        aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
 
        if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
                /* AIO operation */
+               sock_hold(sk);
                areq->iocb = msg->msg_iocb;
+
+               /* Remember output size that will be generated. */
+               areq->outlen = outlen;
+
                aead_request_set_callback(&areq->cra_u.aead_req,
                                          CRYPTO_TFM_REQ_MAY_BACKLOG,
                                          af_alg_async_cb, areq);
                err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
                                 crypto_aead_decrypt(&areq->cra_u.aead_req);
+
+               /* AIO operation in progress */
+               if (err == -EINPROGRESS || err == -EBUSY)
+                       return -EIOCBQUEUED;
+
+               sock_put(sk);
        } else {
                /* Synchronous operation */
                aead_request_set_callback(&areq->cra_u.aead_req,
@@ -285,19 +317,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                                &ctx->wait);
        }
 
-       /* AIO operation in progress */
-       if (err == -EINPROGRESS) {
-               sock_hold(sk);
-
-               /* Remember output size that will be generated. */
-               areq->outlen = outlen;
-
-               return -EIOCBQUEUED;
-       }
 
 free:
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
+       af_alg_free_resources(areq);
 
        return err ? err : outlen;
 }
@@ -487,6 +509,7 @@ static void aead_release(void *private)
        struct aead_tfm *tfm = private;
 
        crypto_free_aead(tfm->aead);
+       crypto_put_default_null_skcipher2();
        kfree(tfm);
 }
 
@@ -519,7 +542,6 @@ static void aead_sock_destruct(struct sock *sk)
        unsigned int ivlen = crypto_aead_ivsize(tfm);
 
        af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
-       crypto_put_default_null_skcipher2();
        sock_kzfree_s(sk, ctx->iv, ivlen);
        sock_kfree_s(sk, ctx, ctx->len);
        af_alg_release_parent(sk);
@@ -549,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
        INIT_LIST_HEAD(&ctx->tsgl_list);
        ctx->len = len;
        ctx->used = 0;
-       ctx->rcvused = 0;
+       atomic_set(&ctx->rcvused, 0);
        ctx->more = 0;
        ctx->merge = 0;
        ctx->enc = 0;
index 9954b07..c5c47b6 100644 (file)
@@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
        int err = 0;
        size_t len = 0;
 
+       if (!ctx->used) {
+               err = af_alg_wait_for_data(sk, flags);
+               if (err)
+                       return err;
+       }
+
        /* Allocate cipher request for current operation. */
        areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
                                     crypto_skcipher_reqsize(tfm));
@@ -117,13 +123,24 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 
        if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
                /* AIO operation */
+               sock_hold(sk);
                areq->iocb = msg->msg_iocb;
+
+               /* Remember output size that will be generated. */
+               areq->outlen = len;
+
                skcipher_request_set_callback(&areq->cra_u.skcipher_req,
                                              CRYPTO_TFM_REQ_MAY_SLEEP,
                                              af_alg_async_cb, areq);
                err = ctx->enc ?
                        crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
                        crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+
+               /* AIO operation in progress */
+               if (err == -EINPROGRESS || err == -EBUSY)
+                       return -EIOCBQUEUED;
+
+               sock_put(sk);
        } else {
                /* Synchronous operation */
                skcipher_request_set_callback(&areq->cra_u.skcipher_req,
@@ -136,19 +153,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
                                                 &ctx->wait);
        }
 
-       /* AIO operation in progress */
-       if (err == -EINPROGRESS) {
-               sock_hold(sk);
-
-               /* Remember output size that will be generated. */
-               areq->outlen = len;
-
-               return -EIOCBQUEUED;
-       }
 
 free:
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
+       af_alg_free_resources(areq);
 
        return err ? err : len;
 }
@@ -383,7 +390,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
        INIT_LIST_HEAD(&ctx->tsgl_list);
        ctx->len = len;
        ctx->used = 0;
-       ctx->rcvused = 0;
+       atomic_set(&ctx->rcvused, 0);
        ctx->more = 0;
        ctx->merge = 0;
        ctx->enc = 0;
index c1ca1e8..a6dcaa6 100644 (file)
@@ -148,8 +148,10 @@ struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen)
        }
 
        ret = pkcs7_check_authattrs(ctx->msg);
-       if (ret < 0)
+       if (ret < 0) {
+               msg = ERR_PTR(ret);
                goto out;
+       }
 
        msg = ctx->msg;
        ctx->msg = NULL;
index f6a009d..1f4e25f 100644 (file)
@@ -69,7 +69,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
                 /* Self-signed certificates form roots of their own, and if we
                  * don't know them, then we can't accept them.
                  */
-               if (x509->next == x509) {
+               if (x509->signer == x509) {
                        kleave(" = -ENOKEY [unknown self-signed]");
                        return -ENOKEY;
                }
index 2d93d9e..39e6de0 100644 (file)
@@ -59,11 +59,8 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
        /* Digest the message [RFC2315 9.3] */
-       ret = crypto_shash_init(desc);
-       if (ret < 0)
-               goto error;
-       ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len,
-                                sig->digest);
+       ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
+                                 sig->digest);
        if (ret < 0)
                goto error;
        pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest);
@@ -150,7 +147,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
                pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
                         sinfo->index, certix);
 
-               if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
+               if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
                        pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
                                sinfo->index);
                        continue;
index bc3035e..de99658 100644 (file)
@@ -73,7 +73,7 @@ int public_key_verify_signature(const struct public_key *pkey,
        char alg_name_buf[CRYPTO_MAX_ALG_NAME];
        void *output;
        unsigned int outlen;
-       int ret = -ENOMEM;
+       int ret;
 
        pr_devel("==>%s()\n", __func__);
 
@@ -99,6 +99,7 @@ int public_key_verify_signature(const struct public_key *pkey,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
+       ret = -ENOMEM;
        req = akcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req)
                goto error_free_tfm;
@@ -127,7 +128,7 @@ int public_key_verify_signature(const struct public_key *pkey,
         * signature and returns that to us.
         */
        ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
-       if (ret < 0)
+       if (ret)
                goto out_free_output;
 
        /* Do the actual verification step. */
@@ -142,6 +143,8 @@ error_free_req:
 error_free_tfm:
        crypto_free_akcipher(tfm);
        pr_devel("<==%s() = %d\n", __func__, ret);
+       if (WARN_ON_ONCE(ret > 0))
+               ret = -EINVAL;
        return ret;
 }
 EXPORT_SYMBOL_GPL(public_key_verify_signature);
index dd03fea..ce2df8c 100644 (file)
@@ -409,6 +409,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
        ctx->cert->pub->pkey_algo = "rsa";
 
        /* Discard the BIT STRING metadata */
+       if (vlen < 1 || *(const u8 *)value != 0)
+               return -EBADMSG;
        ctx->key = value + 1;
        ctx->key_size = vlen - 1;
        return 0;
index c901358..9338b45 100644 (file)
@@ -79,11 +79,7 @@ int x509_get_sig_params(struct x509_certificate *cert)
        desc->tfm = tfm;
        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-       ret = crypto_shash_init(desc);
-       if (ret < 0)
-               goto error_2;
-       might_sleep();
-       ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest);
+       ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
        if (ret < 0)
                goto error_2;
 
@@ -135,7 +131,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
        }
 
        ret = -EKEYREJECTED;
-       if (cert->pub->pkey_algo != cert->sig->pkey_algo)
+       if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
                goto out;
 
        ret = public_key_verify_signature(cert->pub, cert->sig);
index db1bc31..600afa9 100644 (file)
@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
                                                    algt->mask));
        if (IS_ERR(poly))
                return PTR_ERR(poly);
+       poly_hash = __crypto_hash_alg_common(poly);
+
+       err = -EINVAL;
+       if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
+               goto out_put_poly;
 
        err = -ENOMEM;
        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
 
        ctx = aead_instance_ctx(inst);
        ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
-       poly_hash = __crypto_hash_alg_common(poly);
        err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
                                      aead_crypto_instance(inst));
        if (err)
index 92871dc..e747302 100644 (file)
@@ -195,11 +195,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
        salg = shash_attr_alg(tb[1], 0, 0);
        if (IS_ERR(salg))
                return PTR_ERR(salg);
+       alg = &salg->base;
 
+       /* The underlying hash algorithm must be unkeyed */
        err = -EINVAL;
+       if (crypto_shash_alg_has_setkey(salg))
+               goto out_put_alg;
+
        ds = salg->digestsize;
        ss = salg->statesize;
-       alg = &salg->base;
        if (ds > alg->cra_blocksize ||
            ss < alg->cra_blocksize)
                goto out_put_alg;
index 4e64726..eca04d3 100644 (file)
@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
                pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
                crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
                INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
+               spin_lock_init(&cpu_queue->q_lock);
        }
        return 0;
 }
@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
        int cpu, err;
        struct mcryptd_cpu_queue *cpu_queue;
 
-       cpu = get_cpu();
-       cpu_queue = this_cpu_ptr(queue->cpu_queue);
-       rctx->tag.cpu = cpu;
+       cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+       spin_lock(&cpu_queue->q_lock);
+       cpu = smp_processor_id();
+       rctx->tag.cpu = smp_processor_id();
 
        err = crypto_enqueue_request(&cpu_queue->queue, request);
        pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
                 cpu, cpu_queue, request);
+       spin_unlock(&cpu_queue->q_lock);
        queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-       put_cpu();
 
        return err;
 }
@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
        cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
        i = 0;
        while (i < MCRYPTD_BATCH || single_task_running()) {
-               /*
-                * preempt_disable/enable is used to prevent
-                * being preempted by mcryptd_enqueue_request()
-                */
-               local_bh_disable();
-               preempt_disable();
+
+               spin_lock_bh(&cpu_queue->q_lock);
                backlog = crypto_get_backlog(&cpu_queue->queue);
                req = crypto_dequeue_request(&cpu_queue->queue);
-               preempt_enable();
-               local_bh_enable();
+               spin_unlock_bh(&cpu_queue->q_lock);
 
                if (!req) {
                        mcryptd_opportunistic_flush();
@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
                ++i;
        }
        if (cpu_queue->queue.qlen)
-               queue_work(kcrypto_wq, &cpu_queue->work);
+               queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
 }
 
 void mcryptd_flusher(struct work_struct *__work)
index ee9cfb9..f8ec3d4 100644 (file)
@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
        crypto_free_aead(ctx->child);
 }
 
+static void pcrypt_free(struct aead_instance *inst)
+{
+       struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
+
+       crypto_drop_aead(&ctx->spawn);
+       kfree(inst);
+}
+
 static int pcrypt_init_instance(struct crypto_instance *inst,
                                struct crypto_alg *alg)
 {
@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
        inst->alg.encrypt = pcrypt_aead_encrypt;
        inst->alg.decrypt = pcrypt_aead_decrypt;
 
+       inst->free = pcrypt_free;
+
        err = aead_register_instance(tmpl, inst);
        if (err)
                goto out_drop_aead;
@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
        return -EINVAL;
 }
 
-static void pcrypt_free(struct crypto_instance *inst)
-{
-       struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-       crypto_drop_aead(&ctx->spawn);
-       kfree(inst);
-}
-
 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
                                        unsigned long val, void *data)
 {
@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
 static struct crypto_template pcrypt_tmpl = {
        .name = "pcrypt",
        .create = pcrypt_create,
-       .free = pcrypt_free,
        .module = THIS_MODULE,
 };
 
index 0b66dc8..cad395d 100644 (file)
@@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
                return -EINVAL;
 
        if (fips_enabled) {
-               while (!*ptr && n_sz) {
+               while (n_sz && !*ptr) {
                        ptr++;
                        n_sz--;
                }
index f550b5d..d7da0ee 100644 (file)
@@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc,
 
        salsa20_ivsetup(ctx, walk.iv);
 
-       if (likely(walk.nbytes == nbytes))
-       {
-               salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
-                                     walk.src.virt.addr, nbytes);
-               return blkcipher_walk_done(desc, &walk, 0);
-       }
-
        while (walk.nbytes >= 64) {
                salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
                                      walk.src.virt.addr,
index 325a14d..e849d3e 100644 (file)
 
 static const struct crypto_type crypto_shash_type;
 
-static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
-                          unsigned int keylen)
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+                   unsigned int keylen)
 {
        return -ENOSYS;
 }
+EXPORT_SYMBOL_GPL(shash_no_setkey);
 
 static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
                                  unsigned int keylen)
index d5692e3..11af5fd 100644 (file)
@@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 
        walk->total = req->cryptlen;
        walk->nbytes = 0;
+       walk->iv = req->iv;
+       walk->oiv = req->iv;
 
        if (unlikely(!walk->total))
                return 0;
@@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
        scatterwalk_start(&walk->in, req->src);
        scatterwalk_start(&walk->out, req->dst);
 
-       walk->iv = req->iv;
-       walk->oiv = req->iv;
-
        walk->flags &= ~SKCIPHER_WALK_SLEEP;
        walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
                       SKCIPHER_WALK_SLEEP : 0;
@@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        int err;
 
        walk->nbytes = 0;
+       walk->iv = req->iv;
+       walk->oiv = req->iv;
 
        if (unlikely(!walk->total))
                return 0;
@@ -522,8 +523,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
        scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
 
-       walk->iv = req->iv;
-       walk->oiv = req->iv;
+       scatterwalk_done(&walk->in, 0, walk->total);
+       scatterwalk_done(&walk->out, 0, walk->total);
 
        if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
                walk->flags |= SKCIPHER_WALK_SLEEP;
index 1d034b6..e06f7f6 100644 (file)
@@ -105,6 +105,7 @@ obj-$(CONFIG_TC)            += tc/
 obj-$(CONFIG_UWB)              += uwb/
 obj-$(CONFIG_USB_PHY)          += usb/
 obj-$(CONFIG_USB)              += usb/
+obj-$(CONFIG_USB_SUPPORT)      += usb/
 obj-$(CONFIG_PCI)              += usb/
 obj-$(CONFIG_USB_GADGET)       += usb/
 obj-$(CONFIG_OF)               += usb/
index 7f2b02c..2bcffec 100644 (file)
@@ -427,6 +427,142 @@ out:
        return 0;
 }
 
+struct lpss_device_links {
+       const char *supplier_hid;
+       const char *supplier_uid;
+       const char *consumer_hid;
+       const char *consumer_uid;
+       u32 flags;
+};
+
+/*
+ * The _DEP method is used to identify dependencies but instead of creating
+ * device links for every handle in _DEP, only links in the following list are
+ * created. That is necessary because, in the general case, _DEP can refer to
+ * devices that might not have drivers, or that are on different buses, or where
+ * the supplier is not enumerated until after the consumer is probed.
+ */
+static const struct lpss_device_links lpss_device_links[] = {
+       {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+};
+
+static bool hid_uid_match(const char *hid1, const char *uid1,
+                         const char *hid2, const char *uid2)
+{
+       return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static bool acpi_lpss_is_supplier(struct acpi_device *adev,
+                                 const struct lpss_device_links *link)
+{
+       return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+                            link->supplier_hid, link->supplier_uid);
+}
+
+static bool acpi_lpss_is_consumer(struct acpi_device *adev,
+                                 const struct lpss_device_links *link)
+{
+       return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+                            link->consumer_hid, link->consumer_uid);
+}
+
+struct hid_uid {
+       const char *hid;
+       const char *uid;
+};
+
+static int match_hid_uid(struct device *dev, void *data)
+{
+       struct acpi_device *adev = ACPI_COMPANION(dev);
+       struct hid_uid *id = data;
+
+       if (!adev)
+               return 0;
+
+       return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+                            id->hid, id->uid);
+}
+
+static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
+{
+       struct hid_uid data = {
+               .hid = hid,
+               .uid = uid,
+       };
+
+       return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
+}
+
+static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+{
+       struct acpi_handle_list dep_devices;
+       acpi_status status;
+       int i;
+
+       if (!acpi_has_method(adev->handle, "_DEP"))
+               return false;
+
+       status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
+                                        &dep_devices);
+       if (ACPI_FAILURE(status)) {
+               dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
+               return false;
+       }
+
+       for (i = 0; i < dep_devices.count; i++) {
+               if (dep_devices.handles[i] == handle)
+                       return true;
+       }
+
+       return false;
+}
+
+static void acpi_lpss_link_consumer(struct device *dev1,
+                                   const struct lpss_device_links *link)
+{
+       struct device *dev2;
+
+       dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
+       if (!dev2)
+               return;
+
+       if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+               device_link_add(dev2, dev1, link->flags);
+
+       put_device(dev2);
+}
+
+static void acpi_lpss_link_supplier(struct device *dev1,
+                                   const struct lpss_device_links *link)
+{
+       struct device *dev2;
+
+       dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
+       if (!dev2)
+               return;
+
+       if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+               device_link_add(dev1, dev2, link->flags);
+
+       put_device(dev2);
+}
+
+static void acpi_lpss_create_device_links(struct acpi_device *adev,
+                                         struct platform_device *pdev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
+               const struct lpss_device_links *link = &lpss_device_links[i];
+
+               if (acpi_lpss_is_supplier(adev, link))
+                       acpi_lpss_link_consumer(&pdev->dev, link);
+
+               if (acpi_lpss_is_consumer(adev, link))
+                       acpi_lpss_link_supplier(&pdev->dev, link);
+       }
+}
+
 static int acpi_lpss_create_device(struct acpi_device *adev,
                                   const struct acpi_device_id *id)
 {
@@ -465,6 +601,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        acpi_dev_free_resource_list(&resource_list);
 
        if (!pdata->mmio_base) {
+               /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
+               adev->pnp.type.platform_id = 0;
                /* Skip the device, but continue the namespace scan. */
                ret = 0;
                goto err_out;
@@ -500,6 +638,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        adev->driver_data = pdata;
        pdev = acpi_create_platform_device(adev, dev_desc->properties);
        if (!IS_ERR_OR_NULL(pdev)) {
+               acpi_lpss_create_device_links(adev, pdev);
                return 1;
        }
 
index 0972ec0..f53ccc6 100644 (file)
@@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
 static bool device_id_scheme = false;
 module_param(device_id_scheme, bool, 0444);
 
-static bool only_lcd = false;
-module_param(only_lcd, bool, 0444);
+static int only_lcd = -1;
+module_param(only_lcd, int, 0444);
 
 static int register_count;
 static DEFINE_MUTEX(register_count_mutex);
@@ -2136,6 +2136,16 @@ int acpi_video_register(void)
                goto leave;
        }
 
+       /*
+        * We're seeing a lot of bogus backlight interfaces on newer machines
+        * without a LCD such as desktops, servers and HDMI sticks. Checking
+        * the lcd flag fixes this, so enable this on any machines which are
+        * win8 ready (where we also prefer the native backlight driver, so
+        * normally the acpi_video code should not register there anyways).
+        */
+       if (only_lcd == -1)
+               only_lcd = acpi_osi_is_win8();
+
        dmi_check_system(video_dmi_table);
 
        ret = acpi_bus_register_driver(&acpi_video_bus);
index 7a1a68b..2243c81 100644 (file)
@@ -80,6 +80,9 @@
        prefix, ACPICA_COPYRIGHT, \
        prefix
 
+#define ACPI_COMMON_BUILD_TIME \
+       "Build date/time: %s %s\n", __DATE__, __TIME__
+
 /* Macros for usage messages */
 
 #define ACPI_USAGE_HEADER(usage) \
index 71743e5..54b8d9d 100644 (file)
@@ -222,6 +222,10 @@ ACPI_DBR_DEPENDENT_RETURN_VOID(void
 void
 acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
 
+void
+acpi_db_create_execution_thread(char *method_name_arg,
+                               char **arguments, acpi_object_type *types);
+
 void
 acpi_db_create_execution_threads(char *num_threads_arg,
                                 char *num_loops_arg, char *method_name_arg);
index 95eed44..45ef3f5 100644 (file)
@@ -46,7 +46,7 @@
 
 /*****************************************************************************
  *
- * Globals related to the ACPI tables
+ * Globals related to the incoming ACPI tables
  *
  ****************************************************************************/
 
@@ -87,7 +87,7 @@ ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
 
 /*****************************************************************************
  *
- * Mutual exclusion within ACPICA subsystem
+ * Mutual exclusion within the ACPICA subsystem
  *
  ****************************************************************************/
 
@@ -167,7 +167,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
 
 ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
 
-/* Misc */
+/* Miscellaneous */
 
 ACPI_GLOBAL(u32, acpi_gbl_original_mode);
 ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
@@ -191,10 +191,9 @@ extern const char acpi_gbl_lower_hex_digits[];
 extern const char acpi_gbl_upper_hex_digits[];
 extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
 
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
 /* Lists for tracking memory allocations (debug only) */
 
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
 ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
 ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
 ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
@@ -203,7 +202,7 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
 
 /*****************************************************************************
  *
- * Namespace globals
+ * ACPI Namespace
  *
  ****************************************************************************/
 
@@ -234,15 +233,20 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
 
 /*****************************************************************************
  *
- * Interpreter globals
+ * Interpreter/Parser globals
  *
  ****************************************************************************/
 
-ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
-
 /* Control method single step flag */
 
 ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
+ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
+
+/* ASL/ASL+ converter */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_capture_comments, FALSE);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
 
 /*****************************************************************************
  *
@@ -252,7 +256,6 @@ ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
 
 extern struct acpi_bit_register_info
     acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-
 ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
 ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
 
@@ -263,7 +266,6 @@ ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
  ****************************************************************************/
 
 #if (!ACPI_REDUCED_HARDWARE)
-
 ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
 ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
 ACPI_GLOBAL(struct acpi_gpe_block_info *,
@@ -272,10 +274,8 @@ ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
 ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
 ACPI_GLOBAL(struct acpi_fixed_event_handler,
            acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
-
 extern struct acpi_fixed_event_info
     acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
 /*****************************************************************************
@@ -291,14 +291,14 @@ ACPI_GLOBAL(u32, acpi_gpe_count);
 ACPI_GLOBAL(u32, acpi_sci_count);
 ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
 
-/* Support for dynamic control method tracing mechanism */
+/* Dynamic control method tracing mechanism */
 
 ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
 ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
 
 /*****************************************************************************
  *
- * Debugger and Disassembler globals
+ * Debugger and Disassembler
  *
  ****************************************************************************/
 
@@ -326,7 +326,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
 #endif
 
 #ifdef ACPI_DEBUGGER
-
 ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
 ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
 
@@ -340,7 +339,6 @@ ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
 ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
 ACPI_GLOBAL(u8, acpi_gbl_db_terminate_loop);
 ACPI_GLOBAL(u8, acpi_gbl_db_threads_terminated);
-
 ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
 ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
 
@@ -350,32 +348,33 @@ ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
 ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
 ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
 
-/*
- * Statistic globals
- */
+/* Statistics globals */
+
 ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
 ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
 ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
 ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
 ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
 ACPI_GLOBAL(u32, acpi_gbl_num_objects);
-
 #endif                         /* ACPI_DEBUGGER */
 
 #if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
-
 ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
 ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
 ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
 ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
-
 ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE);
-
 #endif
 
-/*
- * Meant for the -ca option.
- */
+/*****************************************************************************
+ *
+ * ACPICA application-specific globals
+ *
+ ****************************************************************************/
+
+/* ASL-to-ASL+ conversion utility (implemented within the iASL compiler) */
+
+#ifdef ACPI_ASL_COMPILER
 ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL);
 ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL);
 ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL);
@@ -386,23 +385,18 @@ ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL);
 ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL);
 ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL);
 
-ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
-
 ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head,
                 NULL);
 ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail,
                 NULL);
-
 ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head,
                 NULL);
 ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail,
                 NULL);
-
 ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head,
                 NULL);
 ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail,
                 NULL);
-
 ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head,
                 NULL);
 ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
@@ -410,30 +404,18 @@ ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
 
 ACPI_INIT_GLOBAL(struct acpi_comment_addr_node,
                 *acpi_gbl_comment_addr_list_head, NULL);
-
-ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
-
 ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL);
 
 ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache);
 ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache);
 ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache);
 
-ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE);
-
 ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE);
 ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL);
-
 ACPI_GLOBAL(char, acpi_gbl_table_sig[4]);
-
-/*****************************************************************************
- *
- * Application globals
- *
- ****************************************************************************/
+#endif
 
 #ifdef ACPI_APPLICATION
-
 ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
 ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
@@ -442,16 +424,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
 
 ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock);       /* For print buffer */
 ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
-
 #endif                         /* ACPI_APPLICATION */
 
-/*****************************************************************************
- *
- * Info/help support
- *
- ****************************************************************************/
-
-extern const struct ah_predefined_name asl_predefined_info[];
-extern const struct ah_device_id asl_device_ids[];
-
 #endif                         /* __ACGLOBAL_H__ */
index 0d45b8b..a56675f 100644 (file)
@@ -622,7 +622,7 @@ struct acpi_control_state {
        union acpi_parse_object *predicate_op;
        u8 *aml_predicate_start;        /* Start of if/while predicate */
        u8 *package_end;        /* End of if/while block */
-       u32 loop_count;         /* While() loop counter */
+       u64 loop_timeout;       /* While() loop timeout */
 };
 
 /*
@@ -1218,16 +1218,17 @@ struct acpi_db_method_info {
        acpi_object_type *types;
 
        /*
-        * Arguments to be passed to method for the command
-        * Threads -
-        *   the Number of threads, ID of current thread and
-        *   Index of current thread inside all them created.
+        * Arguments to be passed to method for the commands Threads and
+        * Background. Note, ACPI specifies a maximum of 7 arguments (0 - 6).
+        *
+        * For the Threads command, the Number of threads, ID of current
+        * thread and Index of current thread inside all them created.
         */
        char init_args;
 #ifdef ACPI_DEBUGGER
-       acpi_object_type arg_types[4];
+       acpi_object_type arg_types[ACPI_METHOD_NUM_ARGS];
 #endif
-       char *arguments[4];
+       char *arguments[ACPI_METHOD_NUM_ARGS];
        char num_threads_str[11];
        char id_of_thread_str[11];
        char index_of_thread_str[11];
index c7f0c96..128a3d7 100644 (file)
  * the plist contains a set of parens to allow variable-length lists.
  * These macros are used for both the debug and non-debug versions of the code.
  */
-#define ACPI_ERROR_NAMESPACE(s, e)          acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_NAMESPACE(s, p, e)       acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
 #define ACPI_ERROR_METHOD(s, n, p, e)       acpi_ut_method_error (AE_INFO, s, n, p, e);
 #define ACPI_WARN_PREDEFINED(plist)         acpi_ut_predefined_warning plist
 #define ACPI_INFO_PREDEFINED(plist)         acpi_ut_predefined_info plist
index 54a0c51..2fb1bb7 100644 (file)
@@ -289,6 +289,9 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
 char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
                                      u8 no_trailing);
 
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+                                     const char *internal_path);
+
 char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
 
 acpi_status
index 83b75e9..b6b29d7 100644 (file)
@@ -118,9 +118,6 @@ extern const char *acpi_gbl_ptyp_decode[];
 #ifndef ACPI_MSG_ERROR
 #define ACPI_MSG_ERROR          "ACPI Error: "
 #endif
-#ifndef ACPI_MSG_EXCEPTION
-#define ACPI_MSG_EXCEPTION      "ACPI Exception: "
-#endif
 #ifndef ACPI_MSG_WARNING
 #define ACPI_MSG_WARNING        "ACPI Warning: "
 #endif
@@ -129,10 +126,10 @@ extern const char *acpi_gbl_ptyp_decode[];
 #endif
 
 #ifndef ACPI_MSG_BIOS_ERROR
-#define ACPI_MSG_BIOS_ERROR     "ACPI BIOS Error (bug): "
+#define ACPI_MSG_BIOS_ERROR     "Firmware Error (ACPI): "
 #endif
 #ifndef ACPI_MSG_BIOS_WARNING
-#define ACPI_MSG_BIOS_WARNING   "ACPI BIOS Warning (bug): "
+#define ACPI_MSG_BIOS_WARNING   "Firmware Warning (ACPI): "
 #endif
 
 /*
@@ -233,10 +230,10 @@ u64 acpi_ut_implicit_strtoul64(char *string);
  */
 acpi_status acpi_ut_init_globals(void);
 
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-
 const char *acpi_ut_get_mutex_name(u32 mutex_id);
 
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
 const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
 #endif
 
@@ -641,9 +638,11 @@ void ut_convert_backslashes(char *pathname);
 
 void acpi_ut_repair_name(char *name);
 
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
 u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
 
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size);
+
 u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
 
 u8
@@ -737,9 +736,11 @@ acpi_ut_predefined_bios_error(const char *module_name,
                              u8 node_flags, const char *format, ...);
 
 void
-acpi_ut_namespace_error(const char *module_name,
-                       u32 line_number,
-                       const char *internal_name, acpi_status lookup_status);
+acpi_ut_prefixed_namespace_error(const char *module_name,
+                                u32 line_number,
+                                union acpi_generic_state *prefix_scope,
+                                const char *internal_name,
+                                acpi_status lookup_status);
 
 void
 acpi_ut_method_error(const char *module_name,
index 3b30319..ed088fc 100644 (file)
@@ -67,6 +67,8 @@ static acpi_status
 acpi_db_execution_walk(acpi_handle obj_handle,
                       u32 nesting_level, void *context, void **return_value);
 
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context);
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_db_delete_objects
@@ -229,7 +231,7 @@ static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info)
 
        ACPI_FUNCTION_NAME(db_execute_setup);
 
-       /* Catenate the current scope to the supplied name */
+       /* Concatenate the current scope to the supplied name */
 
        info->pathname[0] = 0;
        if ((info->name[0] != '\\') && (info->name[0] != '/')) {
@@ -609,6 +611,112 @@ static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context)
        }
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_db_single_execution_thread
+ *
+ * PARAMETERS:  context                 - Method info struct
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Create one thread and execute a method
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context)
+{
+       struct acpi_db_method_info *info = context;
+       acpi_status status;
+       struct acpi_buffer return_obj;
+
+       acpi_os_printf("\n");
+
+       status = acpi_db_execute_method(info, &return_obj);
+       if (ACPI_FAILURE(status)) {
+               acpi_os_printf("%s During evaluation of %s\n",
+                              acpi_format_exception(status), info->pathname);
+               return;
+       }
+
+       /* Display a return object, if any */
+
+       if (return_obj.length) {
+               acpi_os_printf("Evaluation of %s returned object %p, "
+                              "external buffer length %X\n",
+                              acpi_gbl_db_method_info.pathname,
+                              return_obj.pointer, (u32)return_obj.length);
+
+               acpi_db_dump_external_object(return_obj.pointer, 1);
+       }
+
+       acpi_os_printf("\nBackground thread completed\n%c ",
+                      ACPI_DEBUGGER_COMMAND_PROMPT);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_db_create_execution_thread
+ *
+ * PARAMETERS:  method_name_arg         - Control method to execute
+ *              arguments               - Array of arguments to the method
+ *              types                   - Corresponding array of object types
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Create a single thread to evaluate a namespace object. Handles
+ *              arguments passed on command line for control methods.
+ *
+ ******************************************************************************/
+
+void
+acpi_db_create_execution_thread(char *method_name_arg,
+                               char **arguments, acpi_object_type *types)
+{
+       acpi_status status;
+       u32 i;
+
+       memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info));
+       acpi_gbl_db_method_info.name = method_name_arg;
+       acpi_gbl_db_method_info.init_args = 1;
+       acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments;
+       acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types;
+
+       /* Setup method arguments, up to 7 (0-6) */
+
+       for (i = 0; (i < ACPI_METHOD_NUM_ARGS) && *arguments; i++) {
+               acpi_gbl_db_method_info.arguments[i] = *arguments;
+               arguments++;
+
+               acpi_gbl_db_method_info.arg_types[i] = *types;
+               types++;
+       }
+
+       status = acpi_db_execute_setup(&acpi_gbl_db_method_info);
+       if (ACPI_FAILURE(status)) {
+               return;
+       }
+
+       /* Get the NS node, determines existence also */
+
+       status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname,
+                                &acpi_gbl_db_method_info.method);
+       if (ACPI_FAILURE(status)) {
+               acpi_os_printf("%s Could not get handle for %s\n",
+                              acpi_format_exception(status),
+                              acpi_gbl_db_method_info.pathname);
+               return;
+       }
+
+       status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD,
+                                acpi_db_single_execution_thread,
+                                &acpi_gbl_db_method_info);
+       if (ACPI_FAILURE(status)) {
+               return;
+       }
+
+       acpi_os_printf("\nBackground thread started\n");
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_db_create_execution_threads
index 4d81ea2..cf96079 100644 (file)
@@ -99,8 +99,8 @@ void acpi_db_open_debug_file(char *name)
        }
 
        acpi_os_printf("Debug output file %s opened\n", name);
-       strncpy(acpi_gbl_db_debug_filename, name,
-               sizeof(acpi_gbl_db_debug_filename));
+       acpi_ut_safe_strncpy(acpi_gbl_db_debug_filename, name,
+                            sizeof(acpi_gbl_db_debug_filename));
        acpi_gbl_db_output_to_file = TRUE;
 }
 #endif
index 2626d79..954ca3b 100644 (file)
@@ -136,6 +136,7 @@ enum acpi_ex_debugger_commands {
        CMD_UNLOAD,
 
        CMD_TERMINATE,
+       CMD_BACKGROUND,
        CMD_THREADS,
 
        CMD_TEST,
@@ -212,6 +213,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
        {"UNLOAD", 1},
 
        {"TERMINATE", 0},
+       {"BACKGROUND", 1},
        {"THREADS", 3},
 
        {"TEST", 1},
@@ -222,9 +224,56 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
 /*
  * Help for all debugger commands. First argument is the number of lines
  * of help to output for the command.
+ *
+ * Note: Some commands are not supported by the kernel-level version of
+ * the debugger.
  */
 static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
-       {0, "\nGeneral-Purpose Commands:", "\n"},
+       {0, "\nNamespace Access:", "\n"},
+       {1, "  Businfo", "Display system bus info\n"},
+       {1, "  Disassemble <Method>", "Disassemble a control method\n"},
+       {1, "  Find <AcpiName> (? is wildcard)",
+        "Find ACPI name(s) with wildcards\n"},
+       {1, "  Integrity", "Validate namespace integrity\n"},
+       {1, "  Methods", "Display list of loaded control methods\n"},
+       {1, "  Namespace [Object] [Depth]",
+        "Display loaded namespace tree/subtree\n"},
+       {1, "  Notify <Object> <Value>", "Send a notification on Object\n"},
+       {1, "  Objects [ObjectType]",
+        "Display summary of all objects or just given type\n"},
+       {1, "  Owner <OwnerId> [Depth]",
+        "Display loaded namespace by object owner\n"},
+       {1, "  Paths", "Display full pathnames of namespace objects\n"},
+       {1, "  Predefined", "Check all predefined names\n"},
+       {1, "  Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
+       {1, "  References <Addr>", "Find all references to object at addr\n"},
+       {1, "  Resources [DeviceName]",
+        "Display Device resources (no arg = all devices)\n"},
+       {1, "  Set N <NamedObject> <Value>", "Set value for named integer\n"},
+       {1, "  Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
+       {1, "  Type <Object>", "Display object type\n"},
+
+       {0, "\nControl Method Execution:", "\n"},
+       {1, "  Evaluate <Namepath> [Arguments]",
+        "Evaluate object or control method\n"},
+       {1, "  Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"},
+#ifdef ACPI_APPLICATION
+       {1, "  Background <Namepath> [Arguments]",
+        "Evaluate object/method in a separate thread\n"},
+       {1, "  Thread <Threads><Loops><NamePath>",
+        "Spawn threads to execute method(s)\n"},
+#endif
+       {1, "  Debug <Namepath> [Arguments]", "Single-Step a control method\n"},
+       {7, "  [Arguments] formats:", "Control method argument formats\n"},
+       {1, "     Hex Integer", "Integer\n"},
+       {1, "     \"Ascii String\"", "String\n"},
+       {1, "     (Hex Byte List)", "Buffer\n"},
+       {1, "         (01 42 7A BF)", "Buffer example (4 bytes)\n"},
+       {1, "     [Package Element List]", "Package\n"},
+       {1, "         [0x01 0x1234 \"string\"]",
+        "Package example (3 elements)\n"},
+
+       {0, "\nMiscellaneous:", "\n"},
        {1, "  Allocations", "Display list of current memory allocations\n"},
        {2, "  Dump <Address>|<Namepath>", "\n"},
        {0, "       [Byte|Word|Dword|Qword]",
@@ -248,46 +297,30 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
        {1, "     Stack", "Display CPU stack usage\n"},
        {1, "     Tables", "Info about current ACPI table(s)\n"},
        {1, "  Tables", "Display info about loaded ACPI tables\n"},
+#ifdef ACPI_APPLICATION
+       {1, "  Terminate", "Delete namespace and all internal objects\n"},
+#endif
        {1, "  ! <CommandNumber>", "Execute command from history buffer\n"},
        {1, "  !!", "Execute last command again\n"},
 
-       {0, "\nNamespace Access Commands:", "\n"},
-       {1, "  Businfo", "Display system bus info\n"},
-       {1, "  Disassemble <Method>", "Disassemble a control method\n"},
-       {1, "  Find <AcpiName> (? is wildcard)",
-        "Find ACPI name(s) with wildcards\n"},
-       {1, "  Integrity", "Validate namespace integrity\n"},
-       {1, "  Methods", "Display list of loaded control methods\n"},
-       {1, "  Namespace [Object] [Depth]",
-        "Display loaded namespace tree/subtree\n"},
-       {1, "  Notify <Object> <Value>", "Send a notification on Object\n"},
-       {1, "  Objects [ObjectType]",
-        "Display summary of all objects or just given type\n"},
-       {1, "  Owner <OwnerId> [Depth]",
-        "Display loaded namespace by object owner\n"},
-       {1, "  Paths", "Display full pathnames of namespace objects\n"},
-       {1, "  Predefined", "Check all predefined names\n"},
-       {1, "  Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
-       {1, "  References <Addr>", "Find all references to object at addr\n"},
-       {1, "  Resources [DeviceName]",
-        "Display Device resources (no arg = all devices)\n"},
-       {1, "  Set N <NamedObject> <Value>", "Set value for named integer\n"},
-       {1, "  Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
-       {1, "  Type <Object>", "Display object type\n"},
+       {0, "\nMethod and Namespace Debugging:", "\n"},
+       {5, "  Trace <State> [<Namepath>] [Once]",
+        "Trace control method execution\n"},
+       {1, "     Enable", "Enable all messages\n"},
+       {1, "     Disable", "Disable tracing\n"},
+       {1, "     Method", "Enable method execution messages\n"},
+       {1, "     Opcode", "Enable opcode execution messages\n"},
+       {3, "  Test <TestName>", "Invoke a debug test\n"},
+       {1, "     Objects", "Read/write/compare all namespace data objects\n"},
+       {1, "     Predefined",
+        "Validate all ACPI predefined names (_STA, etc.)\n"},
+       {1, "  Execute predefined",
+        "Execute all predefined (public) methods\n"},
 
-       {0, "\nControl Method Execution Commands:", "\n"},
+       {0, "\nControl Method Single-Step Execution:", "\n"},
        {1, "  Arguments (or Args)", "Display method arguments\n"},
        {1, "  Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"},
        {1, "  Call", "Run to next control method invocation\n"},
-       {1, "  Debug <Namepath> [Arguments]", "Single Step a control method\n"},
-       {6, "  Evaluate", "Synonym for Execute\n"},
-       {5, "  Execute <Namepath> [Arguments]", "Execute control method\n"},
-       {1, "     Hex Integer", "Integer method argument\n"},
-       {1, "     \"Ascii String\"", "String method argument\n"},
-       {1, "     (Hex Byte List)", "Buffer method argument\n"},
-       {1, "     [Package Element List]", "Package method argument\n"},
-       {5, "  Execute predefined",
-        "Execute all predefined (public) methods\n"},
        {1, "  Go", "Allow method to run to completion\n"},
        {1, "  Information", "Display info about the current method\n"},
        {1, "  Into", "Step into (not over) a method call\n"},
@@ -296,41 +329,24 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
        {1, "  Results", "Display method result stack\n"},
        {1, "  Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"},
        {1, "  Stop", "Terminate control method\n"},
-       {5, "  Trace <State> [<Namepath>] [Once]",
-        "Trace control method execution\n"},
-       {1, "     Enable", "Enable all messages\n"},
-       {1, "     Disable", "Disable tracing\n"},
-       {1, "     Method", "Enable method execution messages\n"},
-       {1, "     Opcode", "Enable opcode execution messages\n"},
        {1, "  Tree", "Display control method calling tree\n"},
        {1, "  <Enter>", "Single step next AML opcode (over calls)\n"},
 
 #ifdef ACPI_APPLICATION
-       {0, "\nHardware Simulation Commands:", "\n"},
-       {1, "  EnableAcpi", "Enable ACPI (hardware) mode\n"},
-       {1, "  Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
-       {1, "  Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
-       {1, "  Gpes", "Display info on all GPE devices\n"},
-       {1, "  Sci", "Generate an SCI\n"},
-       {1, "  Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
-
-       {0, "\nFile I/O Commands:", "\n"},
+       {0, "\nFile Operations:", "\n"},
        {1, "  Close", "Close debug output file\n"},
        {1, "  Load <Input Filename>", "Load ACPI table from a file\n"},
        {1, "  Open <Output Filename>", "Open a file for debug output\n"},
        {1, "  Unload <Namepath>",
         "Unload an ACPI table via namespace object\n"},
 
-       {0, "\nUser Space Commands:", "\n"},
-       {1, "  Terminate", "Delete namespace and all internal objects\n"},
-       {1, "  Thread <Threads><Loops><NamePath>",
-        "Spawn threads to execute method(s)\n"},
-
-       {0, "\nDebug Test Commands:", "\n"},
-       {3, "  Test <TestName>", "Invoke a debug test\n"},
-       {1, "     Objects", "Read/write/compare all namespace data objects\n"},
-       {1, "     Predefined",
-        "Execute all ACPI predefined names (_STA, etc.)\n"},
+       {0, "\nHardware Simulation:", "\n"},
+       {1, "  EnableAcpi", "Enable ACPI (hardware) mode\n"},
+       {1, "  Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
+       {1, "  Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
+       {1, "  Gpes", "Display info on all GPE devices\n"},
+       {1, "  Sci", "Generate an SCI\n"},
+       {1, "  Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
 #endif
        {0, NULL, NULL}
 };
@@ -442,11 +458,15 @@ static void acpi_db_display_help(char *command)
 
                /* No argument to help, display help for all commands */
 
+               acpi_os_printf("\nSummary of AML Debugger Commands\n\n");
+
                while (next->invocation) {
                        acpi_os_printf("%-38s%s", next->invocation,
                                       next->description);
                        next++;
                }
+               acpi_os_printf("\n");
+
        } else {
                /* Display help for all commands that match the subtring */
 
@@ -1087,6 +1107,13 @@ acpi_db_command_dispatch(char *input_buffer,
                /*  acpi_initialize (NULL); */
                break;
 
+       case CMD_BACKGROUND:
+
+               acpi_db_create_execution_thread(acpi_gbl_db_args[1],
+                                               &acpi_gbl_db_args[2],
+                                               &acpi_gbl_db_arg_types[2]);
+               break;
+
        case CMD_THREADS:
 
                acpi_db_create_execution_threads(acpi_gbl_db_args[1],
index f470e81..4b6ebc2 100644 (file)
@@ -118,6 +118,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
                control_state->control.package_end =
                    walk_state->parser_state.pkg_end;
                control_state->control.opcode = op->common.aml_opcode;
+               control_state->control.loop_timeout = acpi_os_get_timer() +
+                   (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
 
                /* Push the control state on this walk's control stack */
 
@@ -206,15 +208,15 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
                        /* Predicate was true, the body of the loop was just executed */
 
                        /*
-                        * This loop counter mechanism allows the interpreter to escape
-                        * possibly infinite loops. This can occur in poorly written AML
-                        * when the hardware does not respond within a while loop and the
-                        * loop does not implement a timeout.
+                        * This infinite loop detection mechanism allows the interpreter
+                        * to escape possibly infinite loops. This can occur in poorly
+                        * written AML when the hardware does not respond within a while
+                        * loop and the loop does not implement a timeout.
                         */
-                       control_state->control.loop_count++;
-                       if (control_state->control.loop_count >
-                           acpi_gbl_max_loop_iterations) {
-                               status = AE_AML_INFINITE_LOOP;
+                       if (ACPI_TIME_AFTER(acpi_os_get_timer(),
+                                           control_state->control.
+                                           loop_timeout)) {
+                               status = AE_AML_LOOP_TIMEOUT;
                                break;
                        }
 
index 7bcf5f5..0cab34a 100644 (file)
@@ -209,7 +209,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
                                        ACPI_IMODE_LOAD_PASS1, flags,
                                        walk_state, &node);
                if (ACPI_FAILURE(status)) {
-                       ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+                       ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                            arg->common.value.string, status);
                        return_ACPI_STATUS(status);
                }
        }
@@ -383,7 +384,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                                                        walk_state,
                                                        &info->connection_node);
                                if (ACPI_FAILURE(status)) {
-                                       ACPI_ERROR_NAMESPACE(child->common.
+                                       ACPI_ERROR_NAMESPACE(walk_state->
+                                                            scope_info,
+                                                            child->common.
                                                             value.name,
                                                             status);
                                        return_ACPI_STATUS(status);
@@ -402,7 +405,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                                                ACPI_NS_DONT_OPEN_SCOPE,
                                                walk_state, &info->field_node);
                        if (ACPI_FAILURE(status)) {
-                               ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+                               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                                    (char *)&arg->named.name,
                                                     status);
                                return_ACPI_STATUS(status);
                        } else {
@@ -498,7 +502,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
                                                        &region_node);
 #endif
                if (ACPI_FAILURE(status)) {
-                       ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+                       ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                            arg->common.value.name, status);
                        return_ACPI_STATUS(status);
                }
        }
@@ -618,7 +623,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
                                                ACPI_IMODE_LOAD_PASS1, flags,
                                                walk_state, &node);
                        if (ACPI_FAILURE(status)) {
-                               ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+                               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                                    (char *)&arg->named.name,
                                                     status);
                                if (status != AE_ALREADY_EXISTS) {
                                        return_ACPI_STATUS(status);
@@ -681,7 +687,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
                                                        &region_node);
 #endif
                if (ACPI_FAILURE(status)) {
-                       ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+                       ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                            arg->common.value.name, status);
                        return_ACPI_STATUS(status);
                }
        }
@@ -695,7 +702,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
                           ACPI_NS_SEARCH_PARENT, walk_state,
                           &info.register_node);
        if (ACPI_FAILURE(status)) {
-               ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                    arg->common.value.string, status);
                return_ACPI_STATUS(status);
        }
 
@@ -765,7 +773,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
                           ACPI_NS_SEARCH_PARENT, walk_state,
                           &info.register_node);
        if (ACPI_FAILURE(status)) {
-               ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                    arg->common.value.string, status);
                return_ACPI_STATUS(status);
        }
 
@@ -778,7 +787,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
                           ACPI_NS_SEARCH_PARENT, walk_state,
                           &info.data_register_node);
        if (ACPI_FAILURE(status)) {
-               ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                    arg->common.value.string, status);
                return_ACPI_STATUS(status);
        }
 
index 8244855..b21fe08 100644 (file)
@@ -112,7 +112,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
                                                         acpi_namespace_node,
                                                         &(op->common.node)));
                                if (ACPI_FAILURE(status)) {
-                                       ACPI_ERROR_NAMESPACE(op->common.value.
+                                       ACPI_ERROR_NAMESPACE(walk_state->
+                                                            scope_info,
+                                                            op->common.value.
                                                             string, status);
                                        return_ACPI_STATUS(status);
                                }
index 6d487ed..5a602b7 100644 (file)
@@ -297,8 +297,10 @@ acpi_ds_init_package_element(u8 object_type,
 {
        union acpi_operand_object **element_ptr;
 
+       ACPI_FUNCTION_TRACE(ds_init_package_element);
+
        if (!source_object) {
-               return (AE_OK);
+               return_ACPI_STATUS(AE_OK);
        }
 
        /*
@@ -329,7 +331,7 @@ acpi_ds_init_package_element(u8 object_type,
                source_object->package.flags |= AOPOBJ_DATA_VALID;
        }
 
-       return (AE_OK);
+       return_ACPI_STATUS(AE_OK);
 }
 
 /*******************************************************************************
@@ -352,6 +354,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
        union acpi_generic_state scope_info;
        union acpi_operand_object *element = *element_ptr;
        struct acpi_namespace_node *resolved_node;
+       struct acpi_namespace_node *original_node;
        char *external_path = NULL;
        acpi_object_type type;
 
@@ -441,6 +444,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
         * will remain as named references. This behavior is not described
         * in the ACPI spec, but it appears to be an oversight.
         */
+       original_node = resolved_node;
        status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
        if (ACPI_FAILURE(status)) {
                return_VOID;
@@ -468,26 +472,27 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
                 */
        case ACPI_TYPE_DEVICE:
        case ACPI_TYPE_THERMAL:
-
-               /* TBD: This may not be necesssary */
-
-               acpi_ut_add_reference(resolved_node->object);
+       case ACPI_TYPE_METHOD:
                break;
 
        case ACPI_TYPE_MUTEX:
-       case ACPI_TYPE_METHOD:
        case ACPI_TYPE_POWER:
        case ACPI_TYPE_PROCESSOR:
        case ACPI_TYPE_EVENT:
        case ACPI_TYPE_REGION:
 
+               /* acpi_ex_resolve_node_to_value gave these an extra reference */
+
+               acpi_ut_remove_reference(original_node->object);
                break;
 
        default:
                /*
                 * For all other types - the node was resolved to an actual
-                * operand object with a value, return the object
+                * operand object with a value, return the object. Remove
+                * a reference on the existing object.
                 */
+               acpi_ut_remove_reference(element);
                *element_ptr = (union acpi_operand_object *)resolved_node;
                break;
        }
index 0dabd9b..4c5faf6 100644 (file)
@@ -583,7 +583,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
                        }
 
                        if (ACPI_FAILURE(status)) {
-                               ACPI_ERROR_NAMESPACE(name_string, status);
+                               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                                    name_string, status);
                        }
                }
 
index eaa859a..5771e4e 100644 (file)
@@ -207,7 +207,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
                }
 #endif
                if (ACPI_FAILURE(status)) {
-                       ACPI_ERROR_NAMESPACE(path, status);
+                       ACPI_ERROR_NAMESPACE(walk_state->scope_info, path,
+                                            status);
                        return_ACPI_STATUS(status);
                }
 
@@ -375,7 +376,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
                        }
 
                        if (ACPI_FAILURE(status)) {
-                               ACPI_ERROR_NAMESPACE(path, status);
+                               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                                    path, status);
                                return_ACPI_STATUS(status);
                        }
                }
index aad83ef..b3d0aae 100644 (file)
@@ -184,11 +184,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
                                if (status == AE_NOT_FOUND) {
                                        status = AE_OK;
                                } else {
-                                       ACPI_ERROR_NAMESPACE(buffer_ptr,
+                                       ACPI_ERROR_NAMESPACE(walk_state->
+                                                            scope_info,
+                                                            buffer_ptr,
                                                             status);
                                }
 #else
-                               ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+                               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                                    buffer_ptr, status);
 #endif
                                return_ACPI_STATUS(status);
                        }
@@ -343,7 +346,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
        }
 
        if (ACPI_FAILURE(status)) {
-               ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+               ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                    buffer_ptr, status);
                return_ACPI_STATUS(status);
        }
 
@@ -719,7 +723,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
                         */
                        op->common.node = new_node;
                } else {
-                       ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+                       ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+                                            arg->common.value.string, status);
                }
                break;
 
index 28b447f..bb58419 100644 (file)
@@ -298,6 +298,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
                                acpi_ut_get_region_name(region_obj->region.
                                                        space_id)));
+
+               /*
+                * Special case for an EC timeout. These are seen so frequently
+                * that an additional error message is helpful
+                */
+               if ((region_obj->region.space_id == ACPI_ADR_SPACE_EC) &&
+                   (status == AE_TIME)) {
+                       ACPI_ERROR((AE_INFO,
+                                   "Timeout from EC hardware or EC device driver"));
+               }
        }
 
        if (!(handler_desc->address_space.handler_flags &
index 83398dc..b2ff61b 100644 (file)
@@ -617,10 +617,11 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
        u32 length;
        u32 index;
 
-       ACPI_FUNCTION_NAME(ex_dump_operand)
+       ACPI_FUNCTION_NAME(ex_dump_operand);
 
-           /* Check if debug output enabled */
-           if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
+       /* Check if debug output enabled */
+
+       if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
                return;
        }
 
@@ -904,7 +905,7 @@ void
 acpi_ex_dump_operands(union acpi_operand_object **operands,
                      const char *opcode_name, u32 num_operands)
 {
-       ACPI_FUNCTION_NAME(ex_dump_operands);
+       ACPI_FUNCTION_TRACE(ex_dump_operands);
 
        if (!opcode_name) {
                opcode_name = "UNKNOWN";
@@ -928,7 +929,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
 
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
                          "**** End operand dump for [%s]\n", opcode_name));
-       return;
+       return_VOID;
 }
 
 /*******************************************************************************
index a2f4e25..5b42829 100644 (file)
@@ -150,10 +150,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
  *
  ******************************************************************************/
 acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 *time_elapsed)
 {
        acpi_status status;
-       u32 delta_ticks;
+       u64 delta_ticks;
        u64 quotient;
 
        ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
@@ -168,30 +168,29 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
                return_ACPI_STATUS(AE_SUPPORT);
        }
 
+       if (start_ticks == end_ticks) {
+               *time_elapsed = 0;
+               return_ACPI_STATUS(AE_OK);
+       }
+
        /*
         * Compute Tick Delta:
         * Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
         */
-       if (start_ticks < end_ticks) {
-               delta_ticks = end_ticks - start_ticks;
-       } else if (start_ticks > end_ticks) {
+       delta_ticks = end_ticks;
+       if (start_ticks > end_ticks) {
                if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
 
                        /* 24-bit Timer */
 
-                       delta_ticks =
-                           (((0x00FFFFFF - start_ticks) +
-                             end_ticks) & 0x00FFFFFF);
+                       delta_ticks |= (u64)1 << 24;
                } else {
                        /* 32-bit Timer */
 
-                       delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+                       delta_ticks |= (u64)1 << 32;
                }
-       } else {                /* start_ticks == end_ticks */
-
-               *time_elapsed = 0;
-               return_ACPI_STATUS(AE_OK);
        }
+       delta_ticks -= start_ticks;
 
        /*
         * Compute Duration (Requires a 64-bit multiply and divide):
@@ -199,10 +198,10 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
         * time_elapsed (microseconds) =
         *  (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
         */
-       status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
+       status = acpi_ut_short_divide(delta_ticks * ACPI_USEC_PER_SEC,
                                      ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
 
-       *time_elapsed = (u32) quotient;
+       *time_elapsed = (u32)quotient;
        return_ACPI_STATUS(status);
 }
 
index 3094cec..d167903 100644 (file)
@@ -128,14 +128,14 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
        acpi_io_address last_address;
        const struct acpi_port_info *port_info;
 
-       ACPI_FUNCTION_NAME(hw_validate_io_request);
+       ACPI_FUNCTION_TRACE(hw_validate_io_request);
 
        /* Supported widths are 8/16/32 */
 
        if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
                ACPI_ERROR((AE_INFO,
                            "Bad BitWidth parameter: %8.8X", bit_width));
-               return (AE_BAD_PARAMETER);
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
        port_info = acpi_protected_ports;
@@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
                ACPI_ERROR((AE_INFO,
                            "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
                            ACPI_FORMAT_UINT64(address), byte_width));
-               return (AE_LIMIT);
+               return_ACPI_STATUS(AE_LIMIT);
        }
 
        /* Exit if requested address is not within the protected port table */
 
        if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) {
-               return (AE_OK);
+               return_ACPI_STATUS(AE_OK);
        }
 
        /* Check request against the list of protected I/O ports */
@@ -180,8 +180,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
                        /* Port illegality may depend on the _OSI calls made by the BIOS */
 
                        if (acpi_gbl_osi_data >= port_info->osi_dependency) {
-                               ACPI_DEBUG_PRINT((ACPI_DB_IO,
-                                                 "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+                               ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+                                                 "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
                                                  ACPI_FORMAT_UINT64(address),
                                                  byte_width, port_info->name,
                                                  port_info->start,
@@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
                }
        }
 
-       return (AE_OK);
+       return_ACPI_STATUS(AE_OK);
 }
 
 /******************************************************************************
index f2733f5..33e652a 100644 (file)
@@ -644,17 +644,18 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
                                            this_node->object;
                                }
                        }
-#ifdef ACPI_ASL_COMPILER
-                       if (!acpi_gbl_disasm_flag &&
-                           (this_node->flags & ANOBJ_IS_EXTERNAL)) {
-                               this_node->flags |= IMPLICIT_EXTERNAL;
-                       }
-#endif
                }
 
                /* Special handling for the last segment (num_segments == 0) */
 
                else {
+#ifdef ACPI_ASL_COMPILER
+                       if (!acpi_gbl_disasm_flag
+                           && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+                               this_node->flags &= ~IMPLICIT_EXTERNAL;
+                       }
+#endif
+
                        /*
                         * Sanity typecheck of the target object:
                         *
index 539d775..d55dcc8 100644 (file)
@@ -495,7 +495,8 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
 
                /* Check if we are resolving a named reference within a package */
 
-               ACPI_ERROR_NAMESPACE(original_object->string.pointer, status);
+               ACPI_ERROR_NAMESPACE(&scope_info,
+                                    original_object->string.pointer, status);
                goto error_exit;
        }
 
index a410760..22c92d1 100644 (file)
@@ -49,6 +49,9 @@
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsnames")
 
+/* Local Prototypes */
+static void acpi_ns_normalize_pathname(char *original_path);
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_get_external_pathname
@@ -63,6 +66,7 @@ ACPI_MODULE_NAME("nsnames")
  *              for error and debug statements.
  *
  ******************************************************************************/
+
 char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
 {
        char *name_buffer;
@@ -352,3 +356,148 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
 
        return_PTR(name_buffer);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_build_prefixed_pathname
+ *
+ * PARAMETERS:  prefix_scope        - Scope/Path that prefixes the internal path
+ *              internal_path       - Name or path of the namespace node
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Construct a fully qualified pathname from a concatenation of:
+ *              1) Path associated with the prefix_scope namespace node
+ *              2) External path representation of the Internal path
+ *
+ ******************************************************************************/
+
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+                                     const char *internal_path)
+{
+       acpi_status status;
+       char *full_path = NULL;
+       char *external_path = NULL;
+       char *prefix_path = NULL;
+       u32 prefix_path_length = 0;
+
+       /* If there is a prefix, get the pathname to it */
+
+       if (prefix_scope && prefix_scope->scope.node) {
+               prefix_path =
+                   acpi_ns_get_normalized_pathname(prefix_scope->scope.node,
+                                                   TRUE);
+               if (prefix_path) {
+                       prefix_path_length = strlen(prefix_path);
+               }
+       }
+
+       status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_path,
+                                         NULL, &external_path);
+       if (ACPI_FAILURE(status)) {
+               goto cleanup;
+       }
+
+       /* Merge the prefix path and the path. 2 is for one dot and trailing null */
+
+       full_path =
+           ACPI_ALLOCATE_ZEROED(prefix_path_length + strlen(external_path) +
+                                2);
+       if (!full_path) {
+               goto cleanup;
+       }
+
+       /* Don't merge if the External path is already fully qualified */
+
+       if (prefix_path && (*external_path != '\\') && (*external_path != '^')) {
+               strcat(full_path, prefix_path);
+               if (prefix_path[1]) {
+                       strcat(full_path, ".");
+               }
+       }
+
+       acpi_ns_normalize_pathname(external_path);
+       strcat(full_path, external_path);
+
+cleanup:
+       if (prefix_path) {
+               ACPI_FREE(prefix_path);
+       }
+       if (external_path) {
+               ACPI_FREE(external_path);
+       }
+
+       return (full_path);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_normalize_pathname
+ *
+ * PARAMETERS:  original_path       - Path to be normalized, in External format
+ *
+ * RETURN:      The original path is processed in-place
+ *
+ * DESCRIPTION: Remove trailing underscores from each element of a path.
+ *
+ *              For example:  \A___.B___.C___ becomes \A.B.C
+ *
+ ******************************************************************************/
+
+static void acpi_ns_normalize_pathname(char *original_path)
+{
+       char *input_path = original_path;
+       char *new_path_buffer;
+       char *new_path;
+       u32 i;
+
+       /* Allocate a temp buffer in which to construct the new path */
+
+       new_path_buffer = ACPI_ALLOCATE_ZEROED(strlen(input_path) + 1);
+       new_path = new_path_buffer;
+       if (!new_path_buffer) {
+               return;
+       }
+
+       /* Special characters may appear at the beginning of the path */
+
+       if (*input_path == '\\') {
+               *new_path = *input_path;
+               new_path++;
+               input_path++;
+       }
+
+       while (*input_path == '^') {
+               *new_path = *input_path;
+               new_path++;
+               input_path++;
+       }
+
+       /* Remainder of the path */
+
+       while (*input_path) {
+
+               /* Do one nameseg at a time */
+
+               for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+                       if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
+                               *new_path = *input_path;
+                               new_path++;
+                       }
+
+                       input_path++;
+               }
+
+               /* Dot means that there are more namesegs to come */
+
+               if (*input_path == '.') {
+                       *new_path = *input_path;
+                       new_path++;
+                       input_path++;
+               }
+       }
+
+       *new_path = 0;
+       strcpy(original_path, new_path_buffer);
+       ACPI_FREE(new_path_buffer);
+}
index 5de8957..e91dbee 100644 (file)
@@ -417,6 +417,7 @@ acpi_ns_search_and_enter(u32 target_name,
        if (flags & ACPI_NS_EXTERNAL ||
            (walk_state && walk_state->opcode == AML_SCOPE_OP)) {
                new_node->flags |= ANOBJ_IS_EXTERNAL;
+               new_node->flags |= IMPLICIT_EXTERNAL;
        }
 #endif
 
index 783f4c8..9b51f65 100644 (file)
@@ -61,10 +61,10 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
  *
  * PARAMETERS:  handle              - Object handle (optional)
  *              pathname            - Object pathname (optional)
- *              external_params     - List of parameters to pass to method,
+ *              external_params     - List of parameters to pass to method,
  *                                    terminated by NULL. May be NULL
  *                                    if no parameters are being passed.
- *              return_buffer       - Where to put method's return value (if
+ *              return_buffer       - Where to put the object's return value (if
  *                                    any). If NULL, no value is returned.
  *              return_type         - Expected type of return object
  *
@@ -100,13 +100,14 @@ acpi_evaluate_object_typed(acpi_handle handle,
                free_buffer_on_error = TRUE;
        }
 
+       /* Get a handle here, in order to build an error message if needed */
+
+       target_handle = handle;
        if (pathname) {
                status = acpi_get_handle(handle, pathname, &target_handle);
                if (ACPI_FAILURE(status)) {
                        return_ACPI_STATUS(status);
                }
-       } else {
-               target_handle = handle;
        }
 
        full_pathname = acpi_ns_get_external_pathname(target_handle);
index eb9dfac..171e2fa 100644 (file)
@@ -361,7 +361,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
        /* Final exception check (may have been changed from code above) */
 
        if (ACPI_FAILURE(status)) {
-               ACPI_ERROR_NAMESPACE(path, status);
+               ACPI_ERROR_NAMESPACE(walk_state->scope_info, path, status);
 
                if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
                    ACPI_PARSE_EXECUTE) {
index 0bef6df..c0b1798 100644 (file)
@@ -372,16 +372,10 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
                         * external declaration opcode. Setting walk_state->Aml to
                         * walk_state->parser_state.Aml + 2 moves increments the
                         * walk_state->Aml past the object type and the paramcount of the
-                        * external opcode. For the error message, only print the AML
-                        * offset. We could attempt to print the name but this may cause
-                        * a segmentation fault when printing the namepath because the
-                        * AML may be incorrect.
+                        * external opcode.
                         */
-                       acpi_os_printf
-                           ("// Invalid external declaration at AML offset 0x%x.\n",
-                            walk_state->aml -
-                            walk_state->parser_state.aml_start);
                        walk_state->aml = walk_state->parser_state.aml + 2;
+                       walk_state->parser_state.aml = walk_state->aml;
                        return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
                }
 #endif
index 0264276..cd59dfe 100644 (file)
@@ -94,9 +94,11 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
        op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
        op->common.aml_opcode = opcode;
 
-       ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name,
-                                        (acpi_ps_get_opcode_info(opcode))->
-                                        name, sizeof(op->common.aml_op_name)));
+       ACPI_DISASM_ONLY_MEMBERS(acpi_ut_safe_strncpy(op->common.aml_op_name,
+                                                     (acpi_ps_get_opcode_info
+                                                      (opcode))->name,
+                                                     sizeof(op->common.
+                                                            aml_op_name)));
 }
 
 /*******************************************************************************
@@ -158,10 +160,10 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
                if (opcode == AML_SCOPE_OP) {
                        acpi_gbl_current_scope = op;
                }
-       }
 
-       if (gbl_capture_comments) {
-               ASL_CV_TRANSFER_COMMENTS(op);
+               if (acpi_gbl_capture_comments) {
+                       ASL_CV_TRANSFER_COMMENTS(op);
+               }
        }
 
        return (op);
index 615a885..cff7154 100644 (file)
@@ -163,6 +163,9 @@ acpi_debug_print(u32 requested_debug_level,
 {
        acpi_thread_id thread_id;
        va_list args;
+#ifdef ACPI_APPLICATION
+       int fill_count;
+#endif
 
        /* Check if debug output enabled */
 
@@ -202,10 +205,21 @@ acpi_debug_print(u32 requested_debug_level,
                acpi_os_printf("[%u] ", (u32)thread_id);
        }
 
-       acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
-#endif
+       fill_count = 48 - acpi_gbl_nesting_level -
+           strlen(acpi_ut_trim_function_name(function_name));
+       if (fill_count < 0) {
+               fill_count = 0;
+       }
+
+       acpi_os_printf("[%02ld] %*s",
+                      acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " ");
+       acpi_os_printf("%s%*s: ",
+                      acpi_ut_trim_function_name(function_name), fill_count,
+                      " ");
 
+#else
        acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
+#endif
 
        va_start(args, format);
        acpi_os_vprintf(format, args);
index 02cd2c2..55debba 100644 (file)
@@ -395,11 +395,6 @@ const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
        return (acpi_gbl_ref_class_names[object->reference.class]);
 }
 
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_get_mutex_name
@@ -433,6 +428,12 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
        return (acpi_gbl_mutex_names[mutex_id]);
 }
 
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+/*
+ * Strings and procedures used for debug only
+ */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_get_notify_name
index e336818..42388dc 100644 (file)
@@ -180,6 +180,78 @@ acpi_ut_predefined_bios_error(const char *module_name,
        va_end(arg_list);
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_prefixed_namespace_error
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              prefix_scope        - Scope/Path that prefixes the internal path
+ *              internal_path       - Name or path of the namespace node
+ *              lookup_status       - Exception code from NS lookup
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print error message with the full pathname constructed this way:
+ *
+ *                  prefix_scope_node_full_path.externalized_internal_path
+ *
+ * NOTE:        10/2017: Treat the major ns_lookup errors as firmware errors
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_prefixed_namespace_error(const char *module_name,
+                                u32 line_number,
+                                union acpi_generic_state *prefix_scope,
+                                const char *internal_path,
+                                acpi_status lookup_status)
+{
+       char *full_path;
+       const char *message;
+
+       /*
+        * Main cases:
+        * 1) Object creation, object must not already exist
+        * 2) Object lookup, object must exist
+        */
+       switch (lookup_status) {
+       case AE_ALREADY_EXISTS:
+
+               acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+               message = "Failure creating";
+               break;
+
+       case AE_NOT_FOUND:
+
+               acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+               message = "Failure looking up";
+               break;
+
+       default:
+
+               acpi_os_printf(ACPI_MSG_ERROR);
+               message = "Failure looking up";
+               break;
+       }
+
+       /* Concatenate the prefix path and the internal path */
+
+       full_path =
+           acpi_ns_build_prefixed_pathname(prefix_scope, internal_path);
+
+       acpi_os_printf("%s [%s], %s", message,
+                      full_path ? full_path : "Could not get pathname",
+                      acpi_format_exception(lookup_status));
+
+       if (full_path) {
+               ACPI_FREE(full_path);
+       }
+
+       ACPI_MSG_SUFFIX;
+}
+
+#ifdef __OBSOLETE_FUNCTION
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_namespace_error
@@ -240,6 +312,7 @@ acpi_ut_namespace_error(const char *module_name,
        ACPI_MSG_SUFFIX;
        ACPI_MSG_REDIRECT_END;
 }
+#endif
 
 /*******************************************************************************
  *
index 23e766d..45eeb0d 100644 (file)
@@ -206,7 +206,6 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_next_owner_id_offset = 0;
        acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
        acpi_gbl_osi_mutex = NULL;
-       acpi_gbl_max_loop_iterations = ACPI_MAX_LOOP_COUNT;
 
        /* Hardware oriented */
 
index 5f9c680..2055a85 100644 (file)
@@ -134,7 +134,7 @@ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
 
        if ((count & 63) >= 32) {
                operand_ovl.part.hi = operand_ovl.part.lo;
-               operand_ovl.part.lo ^= operand_ovl.part.lo;
+               operand_ovl.part.lo = 0;
                count = (count & 63) - 32;
        }
        ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
@@ -171,7 +171,7 @@ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
 
        if ((count & 63) >= 32) {
                operand_ovl.part.lo = operand_ovl.part.hi;
-               operand_ovl.part.hi ^= operand_ovl.part.hi;
+               operand_ovl.part.hi = 0;
                count = (count & 63) - 32;
        }
        ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
index 5863547..524ba93 100644 (file)
@@ -286,8 +286,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
                acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
        } else {
                ACPI_EXCEPTION((AE_INFO, status,
-                               "Thread %u could not acquire Mutex [0x%X]",
-                               (u32)this_thread_id, mutex_id));
+                               "Thread %u could not acquire Mutex [%s] (0x%X)",
+                               (u32)this_thread_id,
+                               acpi_ut_get_mutex_name(mutex_id), mutex_id));
        }
 
        return (status);
@@ -322,8 +323,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
         */
        if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
                ACPI_ERROR((AE_INFO,
-                           "Mutex [0x%X] is not acquired, cannot release",
-                           mutex_id));
+                           "Mutex [%s] (0x%X) is not acquired, cannot release",
+                           acpi_ut_get_mutex_name(mutex_id), mutex_id));
 
                return (AE_NOT_ACQUIRED);
        }
index 7926649..33a0970 100644 (file)
@@ -140,7 +140,7 @@ int acpi_ut_stricmp(char *string1, char *string2)
        return (c1 - c2);
 }
 
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
@@ -199,4 +199,13 @@ acpi_ut_safe_strncat(char *dest,
        strncat(dest, source, max_transfer_length);
        return (FALSE);
 }
+
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
+{
+       /* Always terminate destination string */
+
+       strncpy(dest, source, dest_size);
+       dest[dest_size - 1] = 0;
+}
+
 #endif
index 3175b13..f6b8dd2 100644 (file)
@@ -101,6 +101,8 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
        {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8},      /* Windows 8 and Server 2012 - Added 08/2012 */
        {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8},      /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
        {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10},     /* Windows 10 - Added 03/2015 */
+       {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
+       {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
 
        /* Feature Group Strings */
 
index 965fb5c..97f48d7 100644 (file)
@@ -52,10 +52,9 @@ static acpi_status
 acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
 
 static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product);
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product);
 
-static acpi_status
-acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum);
 
 /*******************************************************************************
  *
@@ -357,7 +356,7 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
  * FUNCTION:    acpi_ut_strtoul_multiply64
  *
  * PARAMETERS:  multiplicand            - Current accumulated converted integer
- *              multiplier              - Base/Radix
+ *              base                    - Base/Radix
  *              out_product             - Where the product is returned
  *
  * RETURN:      Status and 64-bit product
@@ -369,33 +368,40 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
  ******************************************************************************/
 
 static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product)
 {
-       u64 val;
+       u64 product;
+       u64 quotient;
 
        /* Exit if either operand is zero */
 
        *out_product = 0;
-       if (!multiplicand || !multiplier) {
+       if (!multiplicand || !base) {
                return (AE_OK);
        }
 
-       /* Check for 64-bit overflow before the actual multiplication */
-
-       acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL);
-       if (multiplicand > val) {
+       /*
+        * Check for 64-bit overflow before the actual multiplication.
+        *
+        * Notes: 64-bit division is often not supported on 32-bit platforms
+        * (it requires a library function), Therefore ACPICA has a local
+        * 64-bit divide function. Also, Multiplier is currently only used
+        * as the radix (8/10/16), to the 64/32 divide will always work.
+        */
+       acpi_ut_short_divide(ACPI_UINT64_MAX, base, &quotient, NULL);
+       if (multiplicand > quotient) {
                return (AE_NUMERIC_OVERFLOW);
        }
 
-       val = multiplicand * multiplier;
+       product = multiplicand * base;
 
        /* Check for 32-bit overflow if necessary */
 
-       if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) {
+       if ((acpi_gbl_integer_bit_width == 32) && (product > ACPI_UINT32_MAX)) {
                return (AE_NUMERIC_OVERFLOW);
        }
 
-       *out_product = val;
+       *out_product = product;
        return (AE_OK);
 }
 
@@ -404,7 +410,7 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
  * FUNCTION:    acpi_ut_strtoul_add64
  *
  * PARAMETERS:  addend1                 - Current accumulated converted integer
- *              addend2                 - New hex value/char
+ *              digit                   - New hex value/char
  *              out_sum                 - Where sum is returned (Accumulator)
  *
  * RETURN:      Status and 64-bit sum
@@ -415,17 +421,17 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
  *
  ******************************************************************************/
 
-static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum)
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum)
 {
        u64 sum;
 
        /* Check for 64-bit overflow before the actual addition */
 
-       if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) {
+       if ((addend1 > 0) && (digit > (ACPI_UINT64_MAX - addend1))) {
                return (AE_NUMERIC_OVERFLOW);
        }
 
-       sum = addend1 + addend2;
+       sum = addend1 + digit;
 
        /* Check for 32-bit overflow if necessary */
 
index 3c8de88..633b4e2 100644 (file)
@@ -402,8 +402,8 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
        allocation->component = component;
        allocation->line = line;
 
-       strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME);
-       allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
+       acpi_ut_safe_strncpy(allocation->module, (char *)module,
+                            ACPI_MAX_MODULE_NAME);
 
        if (!element) {
 
@@ -717,7 +717,7 @@ exit:
        if (!num_outstanding) {
                ACPI_INFO(("No outstanding allocations"));
        } else {
-               ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
+               ACPI_ERROR((AE_INFO, "%u (0x%X) Outstanding cache allocations",
                            num_outstanding, num_outstanding));
        }
 
index 950a1e5..9da4f8e 100644 (file)
@@ -96,8 +96,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
  *
  * RETURN:      None
  *
- * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
- *              and decoded acpi_status.
+ * DESCRIPTION: Print an "ACPI Error" message with module/line/version
+ *              info as well as decoded acpi_status.
  *
  ******************************************************************************/
 void ACPI_INTERNAL_VAR_XFACE
@@ -111,10 +111,10 @@ acpi_exception(const char *module_name,
        /* For AE_OK, just print the message */
 
        if (ACPI_SUCCESS(status)) {
-               acpi_os_printf(ACPI_MSG_EXCEPTION);
+               acpi_os_printf(ACPI_MSG_ERROR);
 
        } else {
-               acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+               acpi_os_printf(ACPI_MSG_ERROR "%s, ",
                               acpi_format_exception(status));
        }
 
index 6742f6c..9bff853 100644 (file)
@@ -1007,7 +1007,7 @@ skip:
        /* The record may be cleared by others, try read next record */
        if (len == -ENOENT)
                goto skip;
-       else if (len < sizeof(*rcd)) {
+       else if (len < 0 || len < sizeof(*rcd)) {
                rc = -EIO;
                goto out;
        }
index 6402f7f..16c4a10 100644 (file)
@@ -414,6 +414,51 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
 #endif
 }
 
+/*
+ * PCIe AER errors need to be sent to the AER driver for reporting and
+ * recovery. The GHES severities map to the following AER severities and
+ * require the following handling:
+ *
+ * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
+ *     These need to be reported by the AER driver but no recovery is
+ *     necessary.
+ * GHES_SEV_RECOVERABLE -> AER_NONFATAL
+ * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
+ *     These both need to be reported and recovered from by the AER driver.
+ * GHES_SEV_PANIC does not make it to this handling since the kernel must
+ *     panic.
+ */
+static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+       struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
+
+       if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+           pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+               unsigned int devfn;
+               int aer_severity;
+
+               devfn = PCI_DEVFN(pcie_err->device_id.device,
+                                 pcie_err->device_id.function);
+               aer_severity = cper_severity_to_aer(gdata->error_severity);
+
+               /*
+                * If firmware reset the component to contain
+                * the error, we must reinitialize it before
+                * use, so treat it as a fatal AER error.
+                */
+               if (gdata->flags & CPER_SEC_RESET)
+                       aer_severity = AER_FATAL;
+
+               aer_recover_queue(pcie_err->device_id.segment,
+                                 pcie_err->device_id.bus,
+                                 devfn, aer_severity,
+                                 (struct aer_capability_regs *)
+                                 pcie_err->aer_info);
+       }
+#endif
+}
+
 static void ghes_do_proc(struct ghes *ghes,
                         const struct acpi_hest_generic_status *estatus)
 {
@@ -441,38 +486,9 @@ static void ghes_do_proc(struct ghes *ghes,
                        arch_apei_report_mem_error(sev, mem_err);
                        ghes_handle_memory_failure(gdata, sev);
                }
-#ifdef CONFIG_ACPI_APEI_PCIEAER
                else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
-                       struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
-
-                       if (sev == GHES_SEV_RECOVERABLE &&
-                           sec_sev == GHES_SEV_RECOVERABLE &&
-                           pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
-                           pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
-                               unsigned int devfn;
-                               int aer_severity;
-
-                               devfn = PCI_DEVFN(pcie_err->device_id.device,
-                                                 pcie_err->device_id.function);
-                               aer_severity = cper_severity_to_aer(gdata->error_severity);
-
-                               /*
-                                * If firmware reset the component to contain
-                                * the error, we must reinitialize it before
-                                * use, so treat it as a fatal AER error.
-                                */
-                               if (gdata->flags & CPER_SEC_RESET)
-                                       aer_severity = AER_FATAL;
-
-                               aer_recover_queue(pcie_err->device_id.segment,
-                                                 pcie_err->device_id.bus,
-                                                 devfn, aer_severity,
-                                                 (struct aer_capability_regs *)
-                                                 pcie_err->aer_info);
-                       }
-
+                       ghes_handle_aer(gdata);
                }
-#endif
                else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
                        struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
 
@@ -870,7 +886,6 @@ static void ghes_print_queued_estatus(void)
        struct ghes_estatus_node *estatus_node;
        struct acpi_hest_generic *generic;
        struct acpi_hest_generic_status *estatus;
-       u32 len, node_len;
 
        llnode = llist_del_all(&ghes_estatus_llist);
        /*
@@ -882,8 +897,6 @@ static void ghes_print_queued_estatus(void)
                estatus_node = llist_entry(llnode, struct ghes_estatus_node,
                                           llnode);
                estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
-               len = cper_estatus_len(estatus);
-               node_len = GHES_ESTATUS_NODE_LEN(len);
                generic = estatus_node->generic;
                ghes_print_estatus(NULL, generic, estatus);
                llnode = llnode->next;
index 13e7b56..19bc440 100644 (file)
@@ -70,6 +70,7 @@ static async_cookie_t async_cookie;
 static bool battery_driver_registered;
 static int battery_bix_broken_package;
 static int battery_notification_delay_ms;
+static int battery_full_discharging;
 static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -214,9 +215,12 @@ static int acpi_battery_get_property(struct power_supply *psy,
                return -ENODEV;
        switch (psp) {
        case POWER_SUPPLY_PROP_STATUS:
-               if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
-                       val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
-               else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
+               if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
+                       if (battery_full_discharging && battery->rate_now == 0)
+                               val->intval = POWER_SUPPLY_STATUS_FULL;
+                       else
+                               val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+               } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
                        val->intval = POWER_SUPPLY_STATUS_CHARGING;
                else if (acpi_battery_is_charged(battery))
                        val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1166,6 +1170,12 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
+{
+       battery_full_discharging = 1;
+       return 0;
+}
+
 static const struct dmi_system_id bat_dmi_table[] __initconst = {
        {
                .callback = battery_bix_broken_package_quirk,
@@ -1183,6 +1193,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
                },
        },
+       {
+               .callback = battery_full_discharging_quirk,
+               .ident = "ASUS GL502VSK",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
+               },
+       },
+       {
+               .callback = battery_full_discharging_quirk,
+               .ident = "ASUS UX305LA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
+               },
+       },
        {},
 };
 
@@ -1237,13 +1263,11 @@ static int acpi_battery_add(struct acpi_device *device)
 
 #ifdef CONFIG_ACPI_PROCFS_POWER
        result = acpi_battery_add_fs(device);
-#endif
        if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
                acpi_battery_remove_fs(device);
-#endif
                goto fail;
        }
+#endif
 
        printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
                ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
index bf8e4d3..e1eee7a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/input.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 #include <acpi/button.h>
 
 #define PREFIX "ACPI: "
@@ -76,6 +77,22 @@ static const struct acpi_device_id button_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, button_device_ids);
 
+/*
+ * Some devices which don't even have a lid in anyway have a broken _LID
+ * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
+ */
+static const struct dmi_system_id lid_blacklst[] = {
+       {
+               /* GP-electronic T701 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+               },
+       },
+       {}
+};
+
 static int acpi_button_add(struct acpi_device *device);
 static int acpi_button_remove(struct acpi_device *device);
 static void acpi_button_notify(struct acpi_device *device, u32 event);
@@ -210,6 +227,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
        }
        /* Send the platform triggered reliable event */
        if (do_update) {
+               acpi_handle_debug(device->handle, "ACPI LID %s\n",
+                                 state ? "open" : "closed");
                input_report_switch(button->input, SW_LID, !state);
                input_sync(button->input);
                button->last_state = !!state;
@@ -473,6 +492,9 @@ static int acpi_button_add(struct acpi_device *device)
        char *name, *class;
        int error;
 
+       if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+               return -ENODEV;
+
        button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
        if (!button)
                return -ENOMEM;
index 21c2843..06ea474 100644 (file)
@@ -949,7 +949,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
        }
 
        *val = 0;
-       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
+       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
                vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
        else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
                vaddr = reg_res->sys_mem_vaddr;
@@ -988,7 +988,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
        struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 
-       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
+       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
                vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
        else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
                vaddr = reg_res->sys_mem_vaddr;
@@ -1035,14 +1035,15 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
                *lowest_non_linear_reg, *nominal_reg;
        u64 high, low, nom, min_nonlinear;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
        int ret = 0, regs_in_pcc = 0;
 
-       if (!cpc_desc) {
+       if (!cpc_desc || pcc_ss_id < 0) {
                pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
                return -ENODEV;
        }
 
+       pcc_ss_data = pcc_data[pcc_ss_id];
        highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
        lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
        lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
@@ -1095,15 +1096,16 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
        struct cpc_register_resource *delivered_reg, *reference_reg,
                *ref_perf_reg, *ctr_wrap_reg;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
        u64 delivered, reference, ref_perf, ctr_wrap_time;
        int ret = 0, regs_in_pcc = 0;
 
-       if (!cpc_desc) {
+       if (!cpc_desc || pcc_ss_id < 0) {
                pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
                return -ENODEV;
        }
 
+       pcc_ss_data = pcc_data[pcc_ss_id];
        delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
        reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
        ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
@@ -1169,14 +1171,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
        struct cpc_register_resource *desired_reg;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
        int ret = 0;
 
-       if (!cpc_desc) {
+       if (!cpc_desc || pcc_ss_id < 0) {
                pr_debug("No CPC descriptor for CPU:%d\n", cpu);
                return -ENODEV;
        }
 
+       pcc_ss_data = pcc_data[pcc_ss_id];
        desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
 
        /*
@@ -1301,7 +1304,7 @@ unsigned int cppc_get_transition_latency(int cpu_num)
        struct cpc_desc *cpc_desc;
        struct cpc_register_resource *desired_reg;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
 
        cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
        if (!cpc_desc)
@@ -1311,6 +1314,10 @@ unsigned int cppc_get_transition_latency(int cpu_num)
        if (!CPC_IN_PCC(desired_reg))
                return CPUFREQ_ETERNAL;
 
+       if (pcc_ss_id < 0)
+               return CPUFREQ_ETERNAL;
+
+       pcc_ss_data = pcc_data[pcc_ss_id];
        if (pcc_ss_data->pcc_mpar)
                latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
 
index e4ffaee..c4d0a1c 100644 (file)
@@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev)
         * the sleep state it is going out of and it has never been resumed till
         * now, resume it in case the firmware powered it up.
         */
-       if (dev->power.direct_complete && pm_resume_via_firmware())
+       if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
                pm_request_resume(dev);
 }
 EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
  */
 int acpi_subsys_suspend_noirq(struct device *dev)
 {
-       if (dev_pm_smart_suspend_and_suspended(dev))
+       int ret;
+
+       if (dev_pm_smart_suspend_and_suspended(dev)) {
+               dev->power.may_skip_resume = true;
                return 0;
+       }
+
+       ret = pm_generic_suspend_noirq(dev);
+       if (ret)
+               return ret;
+
+       /*
+        * If the target system sleep state is suspend-to-idle, it is sufficient
+        * to check whether or not the device's wakeup settings are good for
+        * runtime PM.  Otherwise, the pm_resume_via_firmware() check will cause
+        * acpi_subsys_complete() to take care of fixing up the device's state
+        * anyway, if need be.
+        */
+       dev->power.may_skip_resume = device_may_wakeup(dev) ||
+                                       !device_can_wakeup(dev);
 
-       return pm_generic_suspend_noirq(dev);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
 
@@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
  */
 int acpi_subsys_resume_noirq(struct device *dev)
 {
+       if (dev_pm_may_skip_resume(dev))
+               return 0;
+
        /*
         * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
         * during system suspend, so update their runtime PM status to "active"
@@ -1138,7 +1159,7 @@ int acpi_subsys_thaw_noirq(struct device *dev)
         * skip all of the subsequent "thaw" callbacks for the device.
         */
        if (dev_pm_smart_suspend_and_suspended(dev)) {
-               dev->power.direct_complete = true;
+               dev_pm_skip_next_resume_phases(dev);
                return 0;
        }
 
index 2441893..a041689 100644 (file)
@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
        int count;
        struct acpi_hardware_id *id;
 
+       /* Avoid unnecessarily loading modules for non present devices. */
+       if (!acpi_device_is_present(acpi_dev))
+               return 0;
+
        /*
         * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
         * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
index da176c9..d9f38c6 100644 (file)
@@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
        }
 
        acpi_handle_info(ec->handle,
-                        "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+                        "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
                         ec->gpe, ec->command_addr, ec->data_addr);
        return ret;
 }
@@ -1597,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device)
 {
        struct acpi_ec *ec = NULL;
        int ret;
+       bool is_ecdt = false;
+       acpi_status status;
 
        strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_EC_CLASS);
 
-       ec = acpi_ec_alloc();
-       if (!ec)
-               return -ENOMEM;
-       if (ec_parse_device(device->handle, 0, ec, NULL) !=
-               AE_CTRL_TERMINATE) {
+       if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
+               is_ecdt = true;
+               ec = boot_ec;
+       } else {
+               ec = acpi_ec_alloc();
+               if (!ec)
+                       return -ENOMEM;
+               status = ec_parse_device(device->handle, 0, ec, NULL);
+               if (status != AE_CTRL_TERMINATE) {
                        ret = -EINVAL;
                        goto err_alloc;
+               }
        }
 
        if (acpi_is_boot_ec(ec)) {
-               boot_ec_is_ecdt = false;
-               /*
-                * Trust PNP0C09 namespace location rather than ECDT ID.
-                *
-                * But trust ECDT GPE rather than _GPE because of ASUS quirks,
-                * so do not change boot_ec->gpe to ec->gpe.
-                */
-               boot_ec->handle = ec->handle;
-               acpi_handle_debug(ec->handle, "duplicated.\n");
-               acpi_ec_free(ec);
-               ec = boot_ec;
-               ret = acpi_config_boot_ec(ec, ec->handle, true, false);
+               boot_ec_is_ecdt = is_ecdt;
+               if (!is_ecdt) {
+                       /*
+                        * Trust PNP0C09 namespace location rather than
+                        * ECDT ID. But trust ECDT GPE rather than _GPE
+                        * because of ASUS quirks, so do not change
+                        * boot_ec->gpe to ec->gpe.
+                        */
+                       boot_ec->handle = ec->handle;
+                       acpi_handle_debug(ec->handle, "duplicated.\n");
+                       acpi_ec_free(ec);
+                       ec = boot_ec;
+               }
+               ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
        } else
                ret = acpi_ec_setup(ec, true);
        if (ret)
@@ -1635,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device)
        ret = !!request_region(ec->command_addr, 1, "EC cmd");
        WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
 
-       /* Reprobe devices depending on the EC */
-       acpi_walk_dep_device_list(ec->handle);
+       if (!is_ecdt) {
+               /* Reprobe devices depending on the EC */
+               acpi_walk_dep_device_list(ec->handle);
+       }
        acpi_handle_debug(ec->handle, "enumerated.\n");
        return 0;
 
@@ -1692,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
 
 static const struct acpi_device_id ec_device_ids[] = {
        {"PNP0C09", 0},
+       {ACPI_ECDT_HID, 0},
        {"", 0},
 };
 
@@ -1764,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void)
         * Note: ec->handle can be valid if this function is called after
         * acpi_ec_add(), hence the fast path.
         */
-       if (boot_ec->handle != ACPI_ROOT_OBJECT)
-               handle = boot_ec->handle;
-       else if (!acpi_ec_ecdt_get_handle(&handle))
-               return -ENODEV;
-       return acpi_config_boot_ec(boot_ec, handle, true, true);
+       if (boot_ec->handle == ACPI_ROOT_OBJECT) {
+               if (!acpi_ec_ecdt_get_handle(&handle))
+                       return -ENODEV;
+               boot_ec->handle = handle;
+       }
+
+       /* Register to ACPI bus with PM ops attached */
+       return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
 }
 
 #if 0
@@ -2022,6 +2037,12 @@ int __init acpi_ec_init(void)
 
        /* Drivers must be started after acpi_ec_query_init() */
        dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+       /*
+        * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
+        * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
+        * settings but invalid DSDT settings.
+        * https://bugzilla.kernel.org/show_bug.cgi?id=196847
+        */
        ecdt_fail = acpi_ec_ecdt_start();
        return ecdt_fail && dsdt_fail ? -ENODEV : 0;
 }
index 6c7dd7a..dd70d6c 100644 (file)
@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
                return -ENOMEM;
        }
 
-       if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
+       if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
                goto error;
        if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
                                 &first_ec->global_lock))
index 46f0603..f13ba2c 100644 (file)
 
 #define MODULE_NAME    "acpi-ged"
 
+struct acpi_ged_device {
+       struct device *dev;
+       struct list_head event_list;
+};
+
 struct acpi_ged_event {
        struct list_head node;
        struct device *dev;
@@ -76,7 +81,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
        unsigned int irq;
        unsigned int gsi;
        unsigned int irqflags = IRQF_ONESHOT;
-       struct device *dev = context;
+       struct acpi_ged_device *geddev = context;
+       struct device *dev = geddev->dev;
        acpi_handle handle = ACPI_HANDLE(dev);
        acpi_handle evt_handle;
        struct resource r;
@@ -102,8 +108,6 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
                return AE_ERROR;
        }
 
-       dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
-
        event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
        if (!event)
                return AE_ERROR;
@@ -116,29 +120,58 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
        if (r.flags & IORESOURCE_IRQ_SHAREABLE)
                irqflags |= IRQF_SHARED;
 
-       if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
-                                     irqflags, "ACPI:Ged", event)) {
+       if (request_threaded_irq(irq, NULL, acpi_ged_irq_handler,
+                                irqflags, "ACPI:Ged", event)) {
                dev_err(dev, "failed to setup event handler for irq %u\n", irq);
                return AE_ERROR;
        }
 
+       dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+       list_add_tail(&event->node, &geddev->event_list);
        return AE_OK;
 }
 
 static int ged_probe(struct platform_device *pdev)
 {
+       struct acpi_ged_device *geddev;
        acpi_status acpi_ret;
 
+       geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL);
+       if (!geddev)
+               return -ENOMEM;
+
+       geddev->dev = &pdev->dev;
+       INIT_LIST_HEAD(&geddev->event_list);
        acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
-                                      acpi_ged_request_interrupt, &pdev->dev);
+                                      acpi_ged_request_interrupt, geddev);
        if (ACPI_FAILURE(acpi_ret)) {
                dev_err(&pdev->dev, "unable to parse the _CRS record\n");
                return -EINVAL;
        }
+       platform_set_drvdata(pdev, geddev);
 
        return 0;
 }
 
+static void ged_shutdown(struct platform_device *pdev)
+{
+       struct acpi_ged_device *geddev = platform_get_drvdata(pdev);
+       struct acpi_ged_event *event, *next;
+
+       list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+               free_irq(event->irq, event);
+               list_del(&event->node);
+               dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
+                        event->gsi, event->irq);
+       }
+}
+
+static int ged_remove(struct platform_device *pdev)
+{
+       ged_shutdown(pdev);
+       return 0;
+}
+
 static const struct acpi_device_id ged_acpi_ids[] = {
        {"ACPI0013"},
        {},
@@ -146,6 +179,8 @@ static const struct acpi_device_id ged_acpi_ids[] = {
 
 static struct platform_driver ged_driver = {
        .probe = ged_probe,
+       .remove = ged_remove,
+       .shutdown = ged_shutdown,
        .driver = {
                .name = MODULE_NAME,
                .acpi_match_table = ACPI_PTR(ged_acpi_ids),
index fc8c43e..1d0a501 100644 (file)
@@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev);
 bool acpi_device_is_battery(struct acpi_device *adev);
 bool acpi_device_is_first_physical_node(struct acpi_device *adev,
                                        const struct device *dev);
+int acpi_bus_register_early_device(int type);
 
 /* --------------------------------------------------------------------------
                      Device Matching and Notification
@@ -158,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
    -------------------------------------------------------------------------- */
 struct acpi_ec {
        acpi_handle handle;
-       unsigned long gpe;
+       u32 gpe;
        unsigned long command_addr;
        unsigned long data_addr;
        bool global_lock;
index ff2580e..abeb4df 100644 (file)
@@ -1670,6 +1670,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                                dev_name(&adev_dimm->dev));
                return -ENXIO;
        }
+       /*
+        * Record nfit_mem for the notification path to track back to
+        * the nfit sysfs attributes for this dimm device object.
+        */
+       dev_set_drvdata(&adev_dimm->dev, nfit_mem);
 
        /*
         * Until standardization materializes we need to consider 4
@@ -1752,9 +1757,11 @@ static void shutdown_dimm_notify(void *data)
                        sysfs_put(nfit_mem->flags_attr);
                        nfit_mem->flags_attr = NULL;
                }
-               if (adev_dimm)
+               if (adev_dimm) {
                        acpi_remove_notify_handler(adev_dimm->handle,
                                        ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+                       dev_set_drvdata(&adev_dimm->dev, NULL);
+               }
        }
        mutex_unlock(&acpi_desc->init_mutex);
 }
index 917f1cc..8ccaae3 100644 (file)
@@ -460,8 +460,7 @@ int __init acpi_numa_init(void)
                                        srat_proc, ARRAY_SIZE(srat_proc), 0);
 
                cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
-                                           acpi_parse_memory_affinity,
-                                           NR_NODE_MEMBLKS);
+                                           acpi_parse_memory_affinity, 0);
        }
 
        /* SLIT: System Locality Information Table */
index bc3d914..85ad679 100644 (file)
@@ -612,7 +612,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
                        acpi_isa_irq_penalty[link->irq.active] +=
                                PIRQ_PENALTY_PCI_USING;
 
-               printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
+               pr_info("%s [%s] enabled at IRQ %d\n",
                       acpi_device_name(link->device),
                       acpi_device_bid(link->device), link->irq.active);
        }
index 90011aa..886ac8b 100644 (file)
@@ -400,7 +400,7 @@ static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
                        &intel_bxtwc_pmic_opregion_data);
 }
 
-static struct platform_device_id bxt_wc_opregion_id_table[] = {
+static const struct platform_device_id bxt_wc_opregion_id_table[] = {
        { .name = "bxt_wcove_region" },
        {},
 };
@@ -412,9 +412,4 @@ static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
        },
        .id_table = bxt_wc_opregion_id_table,
 };
-
-static int __init intel_bxtwc_pmic_opregion_driver_init(void)
-{
-       return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
-}
-device_initcall(intel_bxtwc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_bxtwc_pmic_opregion_driver);
index 109c1e9..f6d73a2 100644 (file)
@@ -131,7 +131,4 @@ static struct platform_driver chtdc_ti_pmic_opregion_driver = {
        },
        .id_table = chtdc_ti_pmic_opregion_id_table,
 };
-module_platform_driver(chtdc_ti_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(chtdc_ti_pmic_opregion_driver);
index 85636d7..9912422 100644 (file)
@@ -260,11 +260,10 @@ static int intel_cht_wc_pmic_opregion_probe(struct platform_device *pdev)
                        &intel_cht_wc_pmic_opregion_data);
 }
 
-static struct platform_device_id cht_wc_opregion_id_table[] = {
+static const struct platform_device_id cht_wc_opregion_id_table[] = {
        { .name = "cht_wcove_region" },
        {},
 };
-MODULE_DEVICE_TABLE(platform, cht_wc_opregion_id_table);
 
 static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
        .probe = intel_cht_wc_pmic_opregion_probe,
@@ -273,8 +272,4 @@ static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
        },
        .id_table = cht_wc_opregion_id_table,
 };
-module_platform_driver(intel_cht_wc_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC operation region driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(intel_cht_wc_pmic_opregion_driver);
index d7f1761..7ffa740 100644 (file)
@@ -201,9 +201,4 @@ static struct platform_driver intel_crc_pmic_opregion_driver = {
                .name = "crystal_cove_pmic",
        },
 };
-
-static int __init intel_crc_pmic_opregion_driver_init(void)
-{
-       return platform_driver_register(&intel_crc_pmic_opregion_driver);
-}
-device_initcall(intel_crc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_crc_pmic_opregion_driver);
index 6c99d3f..316e551 100644 (file)
@@ -278,9 +278,4 @@ static struct platform_driver intel_xpower_pmic_opregion_driver = {
                .name = "axp288_pmic_acpi",
        },
 };
-
-static int __init intel_xpower_pmic_opregion_driver_init(void)
-{
-       return platform_driver_register(&intel_xpower_pmic_opregion_driver);
-}
-device_initcall(intel_xpower_pmic_opregion_driver_init);
+builtin_platform_driver(intel_xpower_pmic_opregion_driver);
index e14e964..b0fe527 100644 (file)
@@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device)
        case ACPI_BUS_TYPE_SLEEP_BUTTON:
                strcpy(device->pnp.bus_id, "SLPF");
                break;
+       case ACPI_BUS_TYPE_ECDT_EC:
+               strcpy(device->pnp.bus_id, "ECDT");
+               break;
        default:
                acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
                /* Clean up trailing underscores (if any) */
@@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
        case ACPI_BUS_TYPE_SLEEP_BUTTON:
                acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
                break;
+       case ACPI_BUS_TYPE_ECDT_EC:
+               acpi_add_id(pnp, ACPI_ECDT_HID);
+               break;
        }
 }
 
@@ -2046,6 +2052,21 @@ void acpi_bus_trim(struct acpi_device *adev)
 }
 EXPORT_SYMBOL_GPL(acpi_bus_trim);
 
+int acpi_bus_register_early_device(int type)
+{
+       struct acpi_device *device = NULL;
+       int result;
+
+       result = acpi_add_single_object(&device, NULL,
+                                       type, ACPI_STA_DEFAULT);
+       if (result)
+               return result;
+
+       device->flags.match_driver = true;
+       return device_attach(&device->dev);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
+
 static int acpi_bus_scan_fixed(void)
 {
        int result = 0;
index 8082871..46cde09 100644 (file)
@@ -367,10 +367,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
        {},
 };
 
+static bool ignore_blacklist;
+
+void __init acpi_sleep_no_blacklist(void)
+{
+       ignore_blacklist = true;
+}
+
 static void __init acpi_sleep_dmi_check(void)
 {
        int year;
 
+       if (ignore_blacklist)
+               return;
+
        if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
                acpi_nvs_nosave_s3();
 
@@ -697,7 +707,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
 #define ACPI_LPS0_ENTRY                5
 #define ACPI_LPS0_EXIT         6
 
-#define ACPI_S2IDLE_FUNC_MASK  ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
+#define ACPI_LPS0_SCREEN_MASK  ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
+#define ACPI_LPS0_PLATFORM_MASK        ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
 
 static acpi_handle lps0_device_handle;
 static guid_t lps0_dsm_guid;
@@ -900,7 +911,8 @@ static int lps0_device_attach(struct acpi_device *adev,
        if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
                char bitmask = *(char *)out_obj->buffer.pointer;
 
-               if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
+               if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
+                   (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
                        lps0_dsm_func_mask = bitmask;
                        lps0_device_handle = adev->handle;
                        /*
index 06a150b..4fc59c3 100644 (file)
@@ -816,14 +816,8 @@ end:
  * interface:
  *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
  */
-
-/*
- * Currently, the GPE flooding prevention only supports to mask the GPEs
- * numbered from 00 to 7f.
- */
-#define ACPI_MASKABLE_GPE_MAX  0x80
-
-static u64 __initdata acpi_masked_gpes;
+#define ACPI_MASKABLE_GPE_MAX  0xFF
+static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
 
 static int __init acpi_gpe_set_masked_gpes(char *val)
 {
@@ -831,7 +825,7 @@ static int __init acpi_gpe_set_masked_gpes(char *val)
 
        if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
                return -EINVAL;
-       acpi_masked_gpes |= ((u64)1<<gpe);
+       set_bit(gpe, acpi_masked_gpes_map);
 
        return 1;
 }
@@ -843,15 +837,11 @@ void __init acpi_gpe_apply_masked_gpes(void)
        acpi_status status;
        u8 gpe;
 
-       for (gpe = 0;
-            gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
-            gpe++) {
-               if (acpi_masked_gpes & ((u64)1<<gpe)) {
-                       status = acpi_get_gpe_device(gpe, &handle);
-                       if (ACPI_SUCCESS(status)) {
-                               pr_info("Masking GPE 0x%x.\n", gpe);
-                               (void)acpi_mask_gpe(handle, gpe, TRUE);
-                       }
+       for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
+               status = acpi_get_gpe_device(gpe, &handle);
+               if (ACPI_SUCCESS(status)) {
+                       pr_info("Masking GPE 0x%x.\n", gpe);
+                       (void)acpi_mask_gpe(handle, gpe, TRUE);
                }
        }
 }
index 9d49a1a..78db976 100644 (file)
@@ -737,16 +737,17 @@ bool acpi_dev_found(const char *hid)
 }
 EXPORT_SYMBOL(acpi_dev_found);
 
-struct acpi_dev_present_info {
+struct acpi_dev_match_info {
+       const char *dev_name;
        struct acpi_device_id hid[2];
        const char *uid;
        s64 hrv;
 };
 
-static int acpi_dev_present_cb(struct device *dev, void *data)
+static int acpi_dev_match_cb(struct device *dev, void *data)
 {
        struct acpi_device *adev = to_acpi_device(dev);
-       struct acpi_dev_present_info *match = data;
+       struct acpi_dev_match_info *match = data;
        unsigned long long hrv;
        acpi_status status;
 
@@ -757,6 +758,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
            strcmp(adev->pnp.unique_id, match->uid)))
                return 0;
 
+       match->dev_name = acpi_dev_name(adev);
+
        if (match->hrv == -1)
                return 1;
 
@@ -789,20 +792,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
  */
 bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
 {
-       struct acpi_dev_present_info match = {};
+       struct acpi_dev_match_info match = {};
        struct device *dev;
 
        strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
        match.uid = uid;
        match.hrv = hrv;
 
-       dev = bus_find_device(&acpi_bus_type, NULL, &match,
-                             acpi_dev_present_cb);
-
+       dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
        return !!dev;
 }
 EXPORT_SYMBOL(acpi_dev_present);
 
+/**
+ * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * Return device name if a matching device was present
+ * at the moment of invocation, or NULL otherwise.
+ *
+ * See additional information in acpi_dev_present() as well.
+ */
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+       struct acpi_dev_match_info match = {};
+       struct device *dev;
+
+       strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+       match.uid = uid;
+       match.hrv = hrv;
+
+       dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+       return dev ? match.dev_name : NULL;
+}
+EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+
 /*
  * acpi_backlight= handling, this is done here rather then in video_detect.c
  * because __setup cannot be used in modules.
index a73596a..a7ecfde 100644 (file)
@@ -482,7 +482,8 @@ enum binder_deferred_state {
  * @tsk                   task_struct for group_leader of process
  *                        (invariant after initialized)
  * @files                 files_struct for process
- *                        (invariant after initialized)
+ *                        (protected by @files_lock)
+ * @files_lock            mutex to protect @files
  * @deferred_work_node:   element for binder_deferred_list
  *                        (protected by binder_deferred_lock)
  * @deferred_work:        bitmap of deferred work to perform
@@ -530,6 +531,7 @@ struct binder_proc {
        int pid;
        struct task_struct *tsk;
        struct files_struct *files;
+       struct mutex files_lock;
        struct hlist_node deferred_work_node;
        int deferred_work;
        bool is_dead;
@@ -877,20 +879,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
 
 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 {
-       struct files_struct *files = proc->files;
        unsigned long rlim_cur;
        unsigned long irqs;
+       int ret;
 
-       if (files == NULL)
-               return -ESRCH;
-
-       if (!lock_task_sighand(proc->tsk, &irqs))
-               return -EMFILE;
-
+       mutex_lock(&proc->files_lock);
+       if (proc->files == NULL) {
+               ret = -ESRCH;
+               goto err;
+       }
+       if (!lock_task_sighand(proc->tsk, &irqs)) {
+               ret = -EMFILE;
+               goto err;
+       }
        rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
        unlock_task_sighand(proc->tsk, &irqs);
 
-       return __alloc_fd(files, 0, rlim_cur, flags);
+       ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
+err:
+       mutex_unlock(&proc->files_lock);
+       return ret;
 }
 
 /*
@@ -899,8 +907,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 static void task_fd_install(
        struct binder_proc *proc, unsigned int fd, struct file *file)
 {
+       mutex_lock(&proc->files_lock);
        if (proc->files)
                __fd_install(proc->files, fd, file);
+       mutex_unlock(&proc->files_lock);
 }
 
 /*
@@ -910,9 +920,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
 {
        int retval;
 
-       if (proc->files == NULL)
-               return -ESRCH;
-
+       mutex_lock(&proc->files_lock);
+       if (proc->files == NULL) {
+               retval = -ESRCH;
+               goto err;
+       }
        retval = __close_fd(proc->files, fd);
        /* can't restart close syscall because file table entry was cleared */
        if (unlikely(retval == -ERESTARTSYS ||
@@ -920,7 +932,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
                     retval == -ERESTARTNOHAND ||
                     retval == -ERESTART_RESTARTBLOCK))
                retval = -EINTR;
-
+err:
+       mutex_unlock(&proc->files_lock);
        return retval;
 }
 
@@ -1947,6 +1960,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
        }
 }
 
+/**
+ * binder_cleanup_transaction() - cleans up undelivered transaction
+ * @t:         transaction that needs to be cleaned up
+ * @reason:    reason the transaction wasn't delivered
+ * @error_code:        error to return to caller (if synchronous call)
+ */
+static void binder_cleanup_transaction(struct binder_transaction *t,
+                                      const char *reason,
+                                      uint32_t error_code)
+{
+       if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
+               binder_send_failed_reply(t, error_code);
+       } else {
+               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+                       "undelivered transaction %d, %s\n",
+                       t->debug_id, reason);
+               binder_free_transaction(t);
+       }
+}
+
 /**
  * binder_validate_object() - checks for a valid metadata object in a buffer.
  * @buffer:    binder_buffer that we're parsing.
@@ -4015,12 +4048,20 @@ retry:
                if (put_user(cmd, (uint32_t __user *)ptr)) {
                        if (t_from)
                                binder_thread_dec_tmpref(t_from);
+
+                       binder_cleanup_transaction(t, "put_user failed",
+                                                  BR_FAILED_REPLY);
+
                        return -EFAULT;
                }
                ptr += sizeof(uint32_t);
                if (copy_to_user(ptr, &tr, sizeof(tr))) {
                        if (t_from)
                                binder_thread_dec_tmpref(t_from);
+
+                       binder_cleanup_transaction(t, "copy_to_user failed",
+                                                  BR_FAILED_REPLY);
+
                        return -EFAULT;
                }
                ptr += sizeof(tr);
@@ -4090,15 +4131,9 @@ static void binder_release_work(struct binder_proc *proc,
                        struct binder_transaction *t;
 
                        t = container_of(w, struct binder_transaction, work);
-                       if (t->buffer->target_node &&
-                           !(t->flags & TF_ONE_WAY)) {
-                               binder_send_failed_reply(t, BR_DEAD_REPLY);
-                       } else {
-                               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-                                       "undelivered transaction %d\n",
-                                       t->debug_id);
-                               binder_free_transaction(t);
-                       }
+
+                       binder_cleanup_transaction(t, "process died.",
+                                                  BR_DEAD_REPLY);
                } break;
                case BINDER_WORK_RETURN_ERROR: {
                        struct binder_error *e = container_of(
@@ -4605,7 +4640,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
        ret = binder_alloc_mmap_handler(&proc->alloc, vma);
        if (ret)
                return ret;
+       mutex_lock(&proc->files_lock);
        proc->files = get_files_struct(current);
+       mutex_unlock(&proc->files_lock);
        return 0;
 
 err_bad_arg:
@@ -4629,6 +4666,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
        spin_lock_init(&proc->outer_lock);
        get_task_struct(current->group_leader);
        proc->tsk = current->group_leader;
+       mutex_init(&proc->files_lock);
        INIT_LIST_HEAD(&proc->todo);
        proc->default_priority = task_nice(current);
        binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4881,9 +4919,11 @@ static void binder_deferred_func(struct work_struct *work)
 
                files = NULL;
                if (defer & BINDER_DEFERRED_PUT_FILES) {
+                       mutex_lock(&proc->files_lock);
                        files = proc->files;
                        if (files)
                                proc->files = NULL;
+                       mutex_unlock(&proc->files_lock);
                }
 
                if (defer & BINDER_DEFERRED_FLUSH)
index 80854f7..0ae6971 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * MeidaTek AHCI SATA driver
+ * MediaTek AHCI SATA driver
  *
  * Copyright (c) 2017 MediaTek Inc.
  * Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -25,7 +25,7 @@
 #include <linux/reset.h>
 #include "ahci.h"
 
-#define DRV_NAME               "ahci"
+#define DRV_NAME               "ahci-mtk"
 
 #define SYS_CFG                        0x14
 #define SYS_CFG_SATA_MSK       GENMASK(31, 30)
@@ -192,5 +192,5 @@ static struct platform_driver mtk_ahci_driver = {
 };
 module_platform_driver(mtk_ahci_driver);
 
-MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver");
+MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
 MODULE_LICENSE("GPL v2");
index b6b0bf7..2685f28 100644 (file)
@@ -35,6 +35,8 @@
 
 /* port register default value */
 #define AHCI_PORT_PHY_1_CFG    0xa003fffe
+#define AHCI_PORT_PHY2_CFG     0x28184d1f
+#define AHCI_PORT_PHY3_CFG     0x0e081509
 #define AHCI_PORT_TRANS_CFG    0x08000029
 #define AHCI_PORT_AXICC_CFG    0x3fffffff
 
@@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
                writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
                                qpriv->ecc_addr);
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
 
        case AHCI_LS2080A:
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
                writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
                                qpriv->ecc_addr);
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
                writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
                       qpriv->ecc_addr);
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
 
        case AHCI_LS2088A:
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
index 2a88292..3c09122 100644 (file)
@@ -3082,13 +3082,19 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
        bit = fls(mask) - 1;
        mask &= ~(1 << bit);
 
-       /* Mask off all speeds higher than or equal to the current
-        * one.  Force 1.5Gbps if current SPD is not available.
+       /*
+        * Mask off all speeds higher than or equal to the current one.  At
+        * this point, if current SPD is not available and we previously
+        * recorded the link speed from SStatus, the driver has already
+        * masked off the highest bit so mask should already be 1 or 0.
+        * Otherwise, we should not force 1.5Gbps on a link where we have
+        * not previously recorded speed from SStatus.  Just return in this
+        * case.
         */
        if (spd > 1)
                mask &= (1 << (spd - 1)) - 1;
        else
-               mask &= 1;
+               return -EINVAL;
 
        /* were we already at the bottom? */
        if (!mask)
@@ -4443,6 +4449,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         * https://bugzilla.kernel.org/show_bug.cgi?id=121671
         */
        { "LITEON CX1-JB*-HP",  NULL,           ATA_HORKAGE_MAX_SEC_1024 },
+       { "LITEON EP1-*",       NULL,           ATA_HORKAGE_MAX_SEC_1024 },
 
        /* Devices we expect to fail diagnostics */
 
index ffd8d33..6db2e34 100644 (file)
@@ -82,7 +82,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
  * is issued to the device. However, if the controller clock is 133MHz,
  * the following tables must be used.
  */
-static struct pdc2027x_pio_timing {
+static const struct pdc2027x_pio_timing {
        u8 value0, value1, value2;
 } pdc2027x_pio_timing_tbl[] = {
        { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
@@ -92,7 +92,7 @@ static struct pdc2027x_pio_timing {
        { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
 };
 
-static struct pdc2027x_mdma_timing {
+static const struct pdc2027x_mdma_timing {
        u8 value0, value1;
 } pdc2027x_mdma_timing_tbl[] = {
        { 0xdf, 0x5f }, /* MDMA mode 0 */
@@ -100,7 +100,7 @@ static struct pdc2027x_mdma_timing {
        { 0x69, 0x25 }, /* MDMA mode 2 */
 };
 
-static struct pdc2027x_udma_timing {
+static const struct pdc2027x_udma_timing {
        u8 value0, value1, value2;
 } pdc2027x_udma_timing_tbl[] = {
        { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
@@ -649,7 +649,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
  * @host: target ATA host
  * @board_idx: board identifier
  */
-static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
+static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
 {
        long pll_clock;
 
@@ -665,8 +665,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
 
        /* Adjust PLL control register */
        pdc_adjust_pll(host, pll_clock, board_idx);
-
-       return 0;
 }
 
 /**
@@ -753,8 +751,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
        //pci_enable_intx(pdev);
 
        /* initialize adapter */
-       if (pdc_hardware_init(host, board_idx) != 0)
-               return -EIO;
+       pdc_hardware_init(host, board_idx);
 
        pci_set_master(pdev);
        return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
@@ -778,8 +775,7 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev)
        else
                board_idx = PDC_UDMA_133;
 
-       if (pdc_hardware_init(host, board_idx))
-               return -EIO;
+       pdc_hardware_init(host, board_idx);
 
        ata_host_resume(host);
        return 0;
index dd286ad..9287ec9 100644 (file)
@@ -2258,7 +2258,7 @@ static int amb_probe(struct pci_dev *pci_dev,
 
        PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
                dev->atm_dev->number, dev, dev->atm_dev);
-               dev->atm_dev->dev_data = (void *) dev;
+       dev->atm_dev->dev_data = (void *) dev;
 
        // register our address
        amb_esi (dev, dev->atm_dev->esi);
index 126855e..6ebc4e4 100644 (file)
@@ -3083,8 +3083,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
            ASSERT(fore200e_vcc);
 
            len = sprintf(page,
-                         "  %08x  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
-                         (u32)(unsigned long)vcc,
+                         "  %pK  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
+                         vcc,
                          vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
                          fore200e_vcc->tx_pdu,
                          fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
index 6664aa5..5f8e009 100644 (file)
@@ -1586,8 +1586,8 @@ static int service_buffer_allocate(struct lanai_dev *lanai)
            lanai->pci);
        if (unlikely(lanai->service.start == NULL))
                return -ENOMEM;
-       DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n",
-           (unsigned long) lanai->service.start,
+       DPRINTK("allocated service buffer at %p, size %zu(%d)\n",
+           lanai->service.start,
            lanai_buf_size(&lanai->service),
            lanai_buf_size_cardorder(&lanai->service));
        /* Clear ServWrite register to be safe */
@@ -2218,9 +2218,9 @@ static int lanai_dev_open(struct atm_dev *atmdev)
 #endif
        memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
        lanai_timed_poll_start(lanai);
-       printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
+       printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=%p, irq=%u "
                "(%pMF)\n", lanai->number, (int) lanai->pci->revision,
-               (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
+               lanai->base, lanai->pci->irq, atmdev->esi);
        printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
            "board_rev=%d\n", lanai->number,
            lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
index b8825f2..4b04471 100644 (file)
@@ -177,7 +177,7 @@ static int set_loopback(struct atm_dev *dev,int mode)
                default:
                        return -EINVAL;
        }
-        dev->ops->phy_put(dev, control, reg);
+       dev->ops->phy_put(dev, control, reg);
        PRIV(dev)->loop_mode = mode;
        return 0;
 }
index d7d2111..2c2ed9c 100644 (file)
@@ -136,6 +136,7 @@ config CFAG12864B_RATE
 
 config IMG_ASCII_LCD
        tristate "Imagination Technologies ASCII LCD Display"
+       depends on HAS_IOMEM
        default y if MIPS_MALTA || MIPS_SEAD3
        select SYSCON
        help
index 2f6614c..2415ad9 100644 (file)
@@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL
        depends on FW_LOADER
        default y
        help
-         The kernel source tree includes a number of firmware 'blobs'
-         that are used by various drivers. The recommended way to
-         use these is to run "make firmware_install", which, after
-         converting ihex files to binary, copies all of the needed
-         binary files in firmware/ to /lib/firmware/ on your system so
-         that they can be loaded by userspace helpers on request.
+         Various drivers in the kernel source tree may require firmware,
+         which is generally available in your distribution's linux-firmware
+         package.
+
+         The linux-firmware package should install firmware into
+         /lib/firmware/ on your system, so they can be loaded by userspace
+         helpers on request.
 
          Enabling this option will build each required firmware blob
-         into the kernel directly, where request_firmware() will find
-         them without having to call out to userspace. This may be
-         useful if your root file system requires a device that uses
-         such firmware and do not wish to use an initrd.
+         specified by EXTRA_FIRMWARE into the kernel directly, where
+         request_firmware() will find them without having to call out to
+         userspace. This may be useful if your root file system requires a
+         device that uses such firmware and you do not wish to use an
+         initrd.
 
          This single option controls the inclusion of firmware for
-         every driver that uses request_firmware() and ships its
-         firmware in the kernel source tree, which avoids a
+         every driver that uses request_firmware(), which avoids a
          proliferation of 'Include firmware for xxx device' options.
 
          Say 'N' and let firmware be loaded from userspace.
@@ -235,6 +236,9 @@ config GENERIC_CPU_DEVICES
 config GENERIC_CPU_AUTOPROBE
        bool
 
+config GENERIC_CPU_VULNERABILITIES
+       bool
+
 config SOC_BUS
        bool
        select GLOB
index eb3af27..07532d8 100644 (file)
@@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf)
                this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
 }
 
+static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+{
+       return of_property_read_bool(this_leaf->of_node, "cache-unified");
+}
+
 static void cache_of_override_properties(unsigned int cpu)
 {
        int index;
@@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu)
 
        for (index = 0; index < cache_leaves(cpu); index++) {
                this_leaf = this_cpu_ci->info_list + index;
+               /*
+                * init_cache_level must setup the cache level correctly
+                * overriding the architecturally specified levels, so
+                * if type is NONE at this stage, it should be unified
+                */
+               if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+                   cache_node_is_unified(this_leaf))
+                       this_leaf->type = CACHE_TYPE_UNIFIED;
                cache_size(this_leaf);
                cache_get_line_size(this_leaf);
                cache_nr_sets(this_leaf);
index 58a9b60..d990384 100644 (file)
@@ -511,10 +511,58 @@ static void __init cpu_dev_register_generic(void)
 #endif
 }
 
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t __weak cpu_show_meltdown(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "Not affected\n");
+}
+
+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+
+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+       &dev_attr_meltdown.attr,
+       &dev_attr_spectre_v1.attr,
+       &dev_attr_spectre_v2.attr,
+       NULL
+};
+
+static const struct attribute_group cpu_root_vulnerabilities_group = {
+       .name  = "vulnerabilities",
+       .attrs = cpu_root_vulnerabilities_attrs,
+};
+
+static void __init cpu_register_vulnerabilities(void)
+{
+       if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
+                              &cpu_root_vulnerabilities_group))
+               pr_err("Unable to register CPU vulnerabilities\n");
+}
+
+#else
+static inline void cpu_register_vulnerabilities(void) { }
+#endif
+
 void __init cpu_dev_init(void)
 {
        if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
                panic("Failed to register CPU subsystem");
 
        cpu_dev_register_generic();
+       cpu_register_vulnerabilities();
 }
index cd6ccdc..372d10a 100644 (file)
@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->probe)
+       if (isa_driver && isa_driver->probe)
                return isa_driver->probe(dev, to_isa_dev(dev)->id);
 
        return 0;
@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->remove)
+       if (isa_driver && isa_driver->remove)
                return isa_driver->remove(dev, to_isa_dev(dev)->id);
 
        return 0;
@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->shutdown)
+       if (isa_driver && isa_driver->shutdown)
                isa_driver->shutdown(dev, to_isa_dev(dev)->id);
 }
 
@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->suspend)
+       if (isa_driver && isa_driver->suspend)
                return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
 
        return 0;
@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->resume)
+       if (isa_driver && isa_driver->resume)
                return isa_driver->resume(dev, to_isa_dev(dev)->id);
 
        return 0;
index 0c80bea..528b241 100644 (file)
@@ -1032,15 +1032,12 @@ static int genpd_prepare(struct device *dev)
 static int genpd_finish_suspend(struct device *dev, bool poweroff)
 {
        struct generic_pm_domain *genpd;
-       int ret;
+       int ret = 0;
 
        genpd = dev_to_genpd(dev);
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
-               return 0;
-
        if (poweroff)
                ret = pm_generic_poweroff_noirq(dev);
        else
@@ -1048,10 +1045,19 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
        if (ret)
                return ret;
 
-       if (genpd->dev_ops.stop && genpd->dev_ops.start) {
-               ret = pm_runtime_force_suspend(dev);
-               if (ret)
+       if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+               return 0;
+
+       if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+           !pm_runtime_status_suspended(dev)) {
+               ret = genpd_stop_dev(genpd, dev);
+               if (ret) {
+                       if (poweroff)
+                               pm_generic_restore_noirq(dev);
+                       else
+                               pm_generic_resume_noirq(dev);
                        return ret;
+               }
        }
 
        genpd_lock(genpd);
@@ -1085,7 +1091,7 @@ static int genpd_suspend_noirq(struct device *dev)
 static int genpd_resume_noirq(struct device *dev)
 {
        struct generic_pm_domain *genpd;
-       int ret = 0;
+       int ret;
 
        dev_dbg(dev, "%s()\n", __func__);
 
@@ -1094,21 +1100,21 @@ static int genpd_resume_noirq(struct device *dev)
                return -EINVAL;
 
        if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
-               return 0;
+               return pm_generic_resume_noirq(dev);
 
        genpd_lock(genpd);
        genpd_sync_power_on(genpd, true, 0);
        genpd->suspended_count--;
        genpd_unlock(genpd);
 
-       if (genpd->dev_ops.stop && genpd->dev_ops.start)
-               ret = pm_runtime_force_resume(dev);
-
-       ret = pm_generic_resume_noirq(dev);
-       if (ret)
-               return ret;
+       if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+           !pm_runtime_status_suspended(dev)) {
+               ret = genpd_start_dev(genpd, dev);
+               if (ret)
+                       return ret;
+       }
 
-       return ret;
+       return pm_generic_resume_noirq(dev);
 }
 
 /**
@@ -1135,8 +1141,9 @@ static int genpd_freeze_noirq(struct device *dev)
        if (ret)
                return ret;
 
-       if (genpd->dev_ops.stop && genpd->dev_ops.start)
-               ret = pm_runtime_force_suspend(dev);
+       if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+           !pm_runtime_status_suspended(dev))
+               ret = genpd_stop_dev(genpd, dev);
 
        return ret;
 }
@@ -1159,8 +1166,9 @@ static int genpd_thaw_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       if (genpd->dev_ops.stop && genpd->dev_ops.start) {
-               ret = pm_runtime_force_resume(dev);
+       if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+           !pm_runtime_status_suspended(dev)) {
+               ret = genpd_start_dev(genpd, dev);
                if (ret)
                        return ret;
        }
@@ -1217,8 +1225,9 @@ static int genpd_restore_noirq(struct device *dev)
        genpd_sync_power_on(genpd, true, 0);
        genpd_unlock(genpd);
 
-       if (genpd->dev_ops.stop && genpd->dev_ops.start) {
-               ret = pm_runtime_force_resume(dev);
+       if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+           !pm_runtime_status_suspended(dev)) {
+               ret = genpd_start_dev(genpd, dev);
                if (ret)
                        return ret;
        }
@@ -2199,20 +2208,8 @@ int genpd_dev_pm_attach(struct device *dev)
 
        ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
                                        "#power-domain-cells", 0, &pd_args);
-       if (ret < 0) {
-               if (ret != -ENOENT)
-                       return ret;
-
-               /*
-                * Try legacy Samsung-specific bindings
-                * (for backwards compatibility of DT ABI)
-                */
-               pd_args.args_count = 0;
-               pd_args.np = of_parse_phandle(dev->of_node,
-                                               "samsung,power-domain", 0);
-               if (!pd_args.np)
-                       return -ENOENT;
-       }
+       if (ret < 0)
+               return ret;
 
        mutex_lock(&gpd_list_lock);
        pd = genpd_get_from_provider(&pd_args);
index db2f044..02a497e 100644 (file)
@@ -18,7 +18,6 @@
  */
 
 #include <linux/device.h>
-#include <linux/kallsyms.h>
 #include <linux/export.h>
 #include <linux/mutex.h>
 #include <linux/pm.h>
@@ -525,6 +524,88 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 
 /*------------------------- Resume routines -------------------------*/
 
+/**
+ * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
+ * @dev: Target device.
+ *
+ * Make the core skip the "early resume" and "resume" phases for @dev.
+ *
+ * This function can be called by middle-layer code during the "noirq" phase of
+ * system resume if necessary, but not by device drivers.
+ */
+void dev_pm_skip_next_resume_phases(struct device *dev)
+{
+       dev->power.is_late_suspended = false;
+       dev->power.is_suspended = false;
+}
+
+/**
+ * suspend_event - Return a "suspend" message for given "resume" one.
+ * @resume_msg: PM message representing a system-wide resume transition.
+ */
+static pm_message_t suspend_event(pm_message_t resume_msg)
+{
+       switch (resume_msg.event) {
+       case PM_EVENT_RESUME:
+               return PMSG_SUSPEND;
+       case PM_EVENT_THAW:
+       case PM_EVENT_RESTORE:
+               return PMSG_FREEZE;
+       case PM_EVENT_RECOVER:
+               return PMSG_HIBERNATE;
+       }
+       return PMSG_ON;
+}
+
+/**
+ * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Checks whether or not the device may be left in suspend after a system-wide
+ * transition to the working state.
+ */
+bool dev_pm_may_skip_resume(struct device *dev)
+{
+       return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+}
+
+static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
+                                               pm_message_t state,
+                                               const char **info_p)
+{
+       pm_callback_t callback;
+       const char *info;
+
+       if (dev->pm_domain) {
+               info = "noirq power domain ";
+               callback = pm_noirq_op(&dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+               info = "noirq type ";
+               callback = pm_noirq_op(dev->type->pm, state);
+       } else if (dev->class && dev->class->pm) {
+               info = "noirq class ";
+               callback = pm_noirq_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+               info = "noirq bus ";
+               callback = pm_noirq_op(dev->bus->pm, state);
+       } else {
+               return NULL;
+       }
+
+       if (info_p)
+               *info_p = info;
+
+       return callback;
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+                                                pm_message_t state,
+                                                const char **info_p);
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+                                               pm_message_t state,
+                                               const char **info_p);
+
 /**
  * device_resume_noirq - Execute a "noirq resume" callback for given device.
  * @dev: Device to handle.
@@ -536,8 +617,9 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
  */
 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 {
-       pm_callback_t callback = NULL;
-       const char *info = NULL;
+       pm_callback_t callback;
+       const char *info;
+       bool skip_resume;
        int error = 0;
 
        TRACE_DEVICE(dev);
@@ -551,29 +633,61 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
 
        dpm_wait_for_superior(dev, async);
 
-       if (dev->pm_domain) {
-               info = "noirq power domain ";
-               callback = pm_noirq_op(&dev->pm_domain->ops, state);
-       } else if (dev->type && dev->type->pm) {
-               info = "noirq type ";
-               callback = pm_noirq_op(dev->type->pm, state);
-       } else if (dev->class && dev->class->pm) {
-               info = "noirq class ";
-               callback = pm_noirq_op(dev->class->pm, state);
-       } else if (dev->bus && dev->bus->pm) {
-               info = "noirq bus ";
-               callback = pm_noirq_op(dev->bus->pm, state);
+       skip_resume = dev_pm_may_skip_resume(dev);
+
+       callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+       if (callback)
+               goto Run;
+
+       if (skip_resume)
+               goto Skip;
+
+       if (dev_pm_smart_suspend_and_suspended(dev)) {
+               pm_message_t suspend_msg = suspend_event(state);
+
+               /*
+                * If "freeze" callbacks have been skipped during a transition
+                * related to hibernation, the subsequent "thaw" callbacks must
+                * be skipped too or bad things may happen.  Otherwise, resume
+                * callbacks are going to be run for the device, so its runtime
+                * PM status must be changed to reflect the new state after the
+                * transition under way.
+                */
+               if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
+                   !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
+                       if (state.event == PM_EVENT_THAW) {
+                               skip_resume = true;
+                               goto Skip;
+                       } else {
+                               pm_runtime_set_active(dev);
+                       }
+               }
        }
 
-       if (!callback && dev->driver && dev->driver->pm) {
+       if (dev->driver && dev->driver->pm) {
                info = "noirq driver ";
                callback = pm_noirq_op(dev->driver->pm, state);
        }
 
+Run:
        error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
        dev->power.is_noirq_suspended = false;
 
- Out:
+       if (skip_resume) {
+               /*
+                * The device is going to be left in suspend, but it might not
+                * have been in runtime suspend before the system suspended, so
+                * its runtime PM status needs to be updated to avoid confusing
+                * the runtime PM framework when runtime PM is enabled for the
+                * device again.
+                */
+               pm_runtime_set_suspended(dev);
+               dev_pm_skip_next_resume_phases(dev);
+       }
+
+Out:
        complete_all(&dev->power.completion);
        TRACE_RESUME(error);
        return error;
@@ -666,6 +780,35 @@ void dpm_resume_noirq(pm_message_t state)
        dpm_noirq_end();
 }
 
+static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
+                                               pm_message_t state,
+                                               const char **info_p)
+{
+       pm_callback_t callback;
+       const char *info;
+
+       if (dev->pm_domain) {
+               info = "early power domain ";
+               callback = pm_late_early_op(&dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+               info = "early type ";
+               callback = pm_late_early_op(dev->type->pm, state);
+       } else if (dev->class && dev->class->pm) {
+               info = "early class ";
+               callback = pm_late_early_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+               info = "early bus ";
+               callback = pm_late_early_op(dev->bus->pm, state);
+       } else {
+               return NULL;
+       }
+
+       if (info_p)
+               *info_p = info;
+
+       return callback;
+}
+
 /**
  * device_resume_early - Execute an "early resume" callback for given device.
  * @dev: Device to handle.
@@ -676,8 +819,8 @@ void dpm_resume_noirq(pm_message_t state)
  */
 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 {
-       pm_callback_t callback = NULL;
-       const char *info = NULL;
+       pm_callback_t callback;
+       const char *info;
        int error = 0;
 
        TRACE_DEVICE(dev);
@@ -691,19 +834,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
 
        dpm_wait_for_superior(dev, async);
 
-       if (dev->pm_domain) {
-               info = "early power domain ";
-               callback = pm_late_early_op(&dev->pm_domain->ops, state);
-       } else if (dev->type && dev->type->pm) {
-               info = "early type ";
-               callback = pm_late_early_op(dev->type->pm, state);
-       } else if (dev->class && dev->class->pm) {
-               info = "early class ";
-               callback = pm_late_early_op(dev->class->pm, state);
-       } else if (dev->bus && dev->bus->pm) {
-               info = "early bus ";
-               callback = pm_late_early_op(dev->bus->pm, state);
-       }
+       callback = dpm_subsys_resume_early_cb(dev, state, &info);
 
        if (!callback && dev->driver && dev->driver->pm) {
                info = "early driver ";
@@ -1074,6 +1205,77 @@ static pm_message_t resume_event(pm_message_t sleep_state)
        return PMSG_ON;
 }
 
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+       struct device_link *link;
+       int idx;
+
+       if (dev->parent)
+               dev->parent->power.must_resume = true;
+
+       idx = device_links_read_lock();
+
+       list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+               link->supplier->power.must_resume = true;
+
+       device_links_read_unlock(idx);
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+                                                pm_message_t state,
+                                                const char **info_p)
+{
+       pm_callback_t callback;
+       const char *info;
+
+       if (dev->pm_domain) {
+               info = "noirq power domain ";
+               callback = pm_noirq_op(&dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+               info = "noirq type ";
+               callback = pm_noirq_op(dev->type->pm, state);
+       } else if (dev->class && dev->class->pm) {
+               info = "noirq class ";
+               callback = pm_noirq_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+               info = "noirq bus ";
+               callback = pm_noirq_op(dev->bus->pm, state);
+       } else {
+               return NULL;
+       }
+
+       if (info_p)
+               *info_p = info;
+
+       return callback;
+}
+
+static bool device_must_resume(struct device *dev, pm_message_t state,
+                              bool no_subsys_suspend_noirq)
+{
+       pm_message_t resume_msg = resume_event(state);
+
+       /*
+        * If all of the device driver's "noirq", "late" and "early" callbacks
+        * are invoked directly by the core, the decision to allow the device to
+        * stay in suspend can be based on its current runtime PM status and its
+        * wakeup settings.
+        */
+       if (no_subsys_suspend_noirq &&
+           !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
+           !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
+           !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
+               return !pm_runtime_status_suspended(dev) &&
+                       (resume_msg.event != PM_EVENT_RESUME ||
+                        (device_can_wakeup(dev) && !device_may_wakeup(dev)));
+
+       /*
+        * The only safe strategy here is to require that if the device may not
+        * be left in suspend, resume callbacks must be invoked for it.
+        */
+       return !dev->power.may_skip_resume;
+}
+
 /**
  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
  * @dev: Device to handle.
@@ -1085,8 +1287,9 @@ static pm_message_t resume_event(pm_message_t sleep_state)
  */
 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
 {
-       pm_callback_t callback = NULL;
-       const char *info = NULL;
+       pm_callback_t callback;
+       const char *info;
+       bool no_subsys_cb = false;
        int error = 0;
 
        TRACE_DEVICE(dev);
@@ -1105,30 +1308,40 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
        if (dev->power.syscore || dev->power.direct_complete)
                goto Complete;
 
-       if (dev->pm_domain) {
-               info = "noirq power domain ";
-               callback = pm_noirq_op(&dev->pm_domain->ops, state);
-       } else if (dev->type && dev->type->pm) {
-               info = "noirq type ";
-               callback = pm_noirq_op(dev->type->pm, state);
-       } else if (dev->class && dev->class->pm) {
-               info = "noirq class ";
-               callback = pm_noirq_op(dev->class->pm, state);
-       } else if (dev->bus && dev->bus->pm) {
-               info = "noirq bus ";
-               callback = pm_noirq_op(dev->bus->pm, state);
-       }
+       callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+       if (callback)
+               goto Run;
 
-       if (!callback && dev->driver && dev->driver->pm) {
+       no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
+
+       if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+               goto Skip;
+
+       if (dev->driver && dev->driver->pm) {
                info = "noirq driver ";
                callback = pm_noirq_op(dev->driver->pm, state);
        }
 
+Run:
        error = dpm_run_callback(callback, dev, state, info);
-       if (!error)
-               dev->power.is_noirq_suspended = true;
-       else
+       if (error) {
                async_error = error;
+               goto Complete;
+       }
+
+Skip:
+       dev->power.is_noirq_suspended = true;
+
+       if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
+               dev->power.must_resume = dev->power.must_resume ||
+                               atomic_read(&dev->power.usage_count) > 1 ||
+                               device_must_resume(dev, state, no_subsys_cb);
+       } else {
+               dev->power.must_resume = true;
+       }
+
+       if (dev->power.must_resume)
+               dpm_superior_set_must_resume(dev);
 
 Complete:
        complete_all(&dev->power.completion);
@@ -1234,6 +1447,50 @@ int dpm_suspend_noirq(pm_message_t state)
        return ret;
 }
 
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+       struct device *parent = dev->parent;
+
+       if (!parent)
+               return;
+
+       spin_lock_irq(&parent->power.lock);
+
+       if (dev->power.wakeup_path && !parent->power.ignore_children)
+               parent->power.wakeup_path = true;
+
+       spin_unlock_irq(&parent->power.lock);
+}
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+                                               pm_message_t state,
+                                               const char **info_p)
+{
+       pm_callback_t callback;
+       const char *info;
+
+       if (dev->pm_domain) {
+               info = "late power domain ";
+               callback = pm_late_early_op(&dev->pm_domain->ops, state);
+       } else if (dev->type && dev->type->pm) {
+               info = "late type ";
+               callback = pm_late_early_op(dev->type->pm, state);
+       } else if (dev->class && dev->class->pm) {
+               info = "late class ";
+               callback = pm_late_early_op(dev->class->pm, state);
+       } else if (dev->bus && dev->bus->pm) {
+               info = "late bus ";
+               callback = pm_late_early_op(dev->bus->pm, state);
+       } else {
+               return NULL;
+       }
+
+       if (info_p)
+               *info_p = info;
+
+       return callback;
+}
+
 /**
  * __device_suspend_late - Execute a "late suspend" callback for given device.
  * @dev: Device to handle.
@@ -1244,8 +1501,8 @@ int dpm_suspend_noirq(pm_message_t state)
  */
 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
 {
-       pm_callback_t callback = NULL;
-       const char *info = NULL;
+       pm_callback_t callback;
+       const char *info;
        int error = 0;
 
        TRACE_DEVICE(dev);
@@ -1266,30 +1523,29 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
        if (dev->power.syscore || dev->power.direct_complete)
                goto Complete;
 
-       if (dev->pm_domain) {
-               info = "late power domain ";
-               callback = pm_late_early_op(&dev->pm_domain->ops, state);
-       } else if (dev->type && dev->type->pm) {
-               info = "late type ";
-               callback = pm_late_early_op(dev->type->pm, state);
-       } else if (dev->class && dev->class->pm) {
-               info = "late class ";
-               callback = pm_late_early_op(dev->class->pm, state);
-       } else if (dev->bus && dev->bus->pm) {
-               info = "late bus ";
-               callback = pm_late_early_op(dev->bus->pm, state);
-       }
+       callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+       if (callback)
+               goto Run;
 
-       if (!callback && dev->driver && dev->driver->pm) {
+       if (dev_pm_smart_suspend_and_suspended(dev) &&
+           !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+               goto Skip;
+
+       if (dev->driver && dev->driver->pm) {
                info = "late driver ";
                callback = pm_late_early_op(dev->driver->pm, state);
        }
 
+Run:
        error = dpm_run_callback(callback, dev, state, info);
-       if (!error)
-               dev->power.is_late_suspended = true;
-       else
+       if (error) {
                async_error = error;
+               goto Complete;
+       }
+       dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+       dev->power.is_late_suspended = true;
 
 Complete:
        TRACE_SUSPEND(error);
@@ -1420,11 +1676,17 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
        return error;
 }
 
-static void dpm_clear_suppliers_direct_complete(struct device *dev)
+static void dpm_clear_superiors_direct_complete(struct device *dev)
 {
        struct device_link *link;
        int idx;
 
+       if (dev->parent) {
+               spin_lock_irq(&dev->parent->power.lock);
+               dev->parent->power.direct_complete = false;
+               spin_unlock_irq(&dev->parent->power.lock);
+       }
+
        idx = device_links_read_lock();
 
        list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
@@ -1485,6 +1747,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                dev->power.direct_complete = false;
        }
 
+       dev->power.may_skip_resume = false;
+       dev->power.must_resume = false;
+
        dpm_watchdog_set(&wd, dev);
        device_lock(dev);
 
@@ -1528,20 +1793,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
  End:
        if (!error) {
-               struct device *parent = dev->parent;
-
                dev->power.is_suspended = true;
-               if (parent) {
-                       spin_lock_irq(&parent->power.lock);
-
-                       dev->parent->power.direct_complete = false;
-                       if (dev->power.wakeup_path
-                           && !dev->parent->power.ignore_children)
-                               dev->parent->power.wakeup_path = true;
+               if (device_may_wakeup(dev))
+                       dev->power.wakeup_path = true;
 
-                       spin_unlock_irq(&parent->power.lock);
-               }
-               dpm_clear_suppliers_direct_complete(dev);
+               dpm_propagate_wakeup_to_parent(dev);
+               dpm_clear_superiors_direct_complete(dev);
        }
 
        device_unlock(dev);
@@ -1650,8 +1907,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
        if (dev->power.syscore)
                return 0;
 
-       WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
-               !pm_runtime_enabled(dev));
+       WARN_ON(!pm_runtime_enabled(dev) &&
+               dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
+                                             DPM_FLAG_LEAVE_SUSPENDED));
 
        /*
         * If a device's parent goes into runtime suspend at the wrong time,
@@ -1663,7 +1921,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
 
        device_lock(dev);
 
-       dev->power.wakeup_path = device_may_wakeup(dev);
+       dev->power.wakeup_path = false;
 
        if (dev->power.no_pm_callbacks) {
                ret = 1;        /* Let device go direct_complete */
index 7beee75..21244c5 100644 (file)
@@ -41,20 +41,15 @@ extern void dev_pm_disable_wake_irq_check(struct device *dev);
 
 #ifdef CONFIG_PM_SLEEP
 
-extern int device_wakeup_attach_irq(struct device *dev,
-                                   struct wake_irq *wakeirq);
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
 extern void device_wakeup_detach_irq(struct device *dev);
 extern void device_wakeup_arm_wake_irqs(void);
 extern void device_wakeup_disarm_wake_irqs(void);
 
 #else
 
-static inline int
-device_wakeup_attach_irq(struct device *dev,
-                        struct wake_irq *wakeirq)
-{
-       return 0;
-}
+static inline void device_wakeup_attach_irq(struct device *dev,
+                                           struct wake_irq *wakeirq) {}
 
 static inline void device_wakeup_detach_irq(struct device *dev)
 {
index 027d159..8bef3cb 100644 (file)
@@ -276,7 +276,8 @@ static int rpm_get_suppliers(struct device *dev)
                        continue;
 
                retval = pm_runtime_get_sync(link->supplier);
-               if (retval < 0) {
+               /* Ignore suppliers with disabled runtime PM. */
+               if (retval < 0 && retval != -EACCES) {
                        pm_runtime_put_noidle(link->supplier);
                        return retval;
                }
@@ -1612,22 +1613,34 @@ void pm_runtime_drop_link(struct device *dev)
        spin_unlock_irq(&dev->power.lock);
 }
 
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+       return atomic_read(&dev->power.usage_count) <= 1 &&
+               (atomic_read(&dev->power.child_count) == 0 ||
+                dev->power.ignore_children);
+}
+
 /**
  * pm_runtime_force_suspend - Force a device into suspend state if needed.
  * @dev: Device to suspend.
  *
  * Disable runtime PM so we safely can check the device's runtime PM status and
- * if it is active, invoke it's .runtime_suspend callback to bring it into
- * suspend state. Keep runtime PM disabled to preserve the state unless we
- * encounter errors.
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent).  Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
  *
  * Typically this function may be invoked from a system suspend callback to make
- * sure the device is put into low power state.
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states.  It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
  */
 int pm_runtime_force_suspend(struct device *dev)
 {
        int (*callback)(struct device *);
-       int ret = 0;
+       int ret;
 
        pm_runtime_disable(dev);
        if (pm_runtime_status_suspended(dev))
@@ -1635,27 +1648,23 @@ int pm_runtime_force_suspend(struct device *dev)
 
        callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 
-       if (!callback) {
-               ret = -ENOSYS;
-               goto err;
-       }
-
-       ret = callback(dev);
+       ret = callback ? callback(dev) : 0;
        if (ret)
                goto err;
 
        /*
-        * Increase the runtime PM usage count for the device's parent, in case
-        * when we find the device being used when system suspend was invoked.
-        * This informs pm_runtime_force_resume() to resume the parent
-        * immediately, which is needed to be able to resume its children,
-        * when not deferring the resume to be managed via runtime PM.
+        * If the device can stay in suspend after the system-wide transition
+        * to the working state that will follow, drop the children counter of
+        * its parent, but set its status to RPM_SUSPENDED anyway in case this
+        * function will be called again for it in the meantime.
         */
-       if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
-               pm_runtime_get_noresume(dev->parent);
+       if (pm_runtime_need_not_resume(dev))
+               pm_runtime_set_suspended(dev);
+       else
+               __update_runtime_status(dev, RPM_SUSPENDED);
 
-       pm_runtime_set_suspended(dev);
        return 0;
+
 err:
        pm_runtime_enable(dev);
        return ret;
@@ -1668,13 +1677,9 @@ EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
  *
  * Prior invoking this function we expect the user to have brought the device
  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power, if it is expected to be
- * used on system resume. To distinguish that, we check whether the runtime PM
- * usage count is greater than 1 (the PM core increases the usage count in the
- * system PM prepare phase), as that indicates a real user (such as a subsystem,
- * driver, userspace, etc.) is using it. If that is the case, the device is
- * expected to be used on system resume as well, so then we resume it. In the
- * other case, we defer the resume to be managed via runtime PM.
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume.  In the other case, we defer the resume to be managed
+ * via runtime PM.
  *
  * Typically this function may be invoked from a system resume callback.
  */
@@ -1683,32 +1688,18 @@ int pm_runtime_force_resume(struct device *dev)
        int (*callback)(struct device *);
        int ret = 0;
 
-       callback = RPM_GET_CALLBACK(dev, runtime_resume);
-
-       if (!callback) {
-               ret = -ENOSYS;
-               goto out;
-       }
-
-       if (!pm_runtime_status_suspended(dev))
+       if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
                goto out;
 
        /*
-        * Decrease the parent's runtime PM usage count, if we increased it
-        * during system suspend in pm_runtime_force_suspend().
-       */
-       if (atomic_read(&dev->power.usage_count) > 1) {
-               if (dev->parent)
-                       pm_runtime_put_noidle(dev->parent);
-       } else {
-               goto out;
-       }
+        * The value of the parent's children counter is correct already, so
+        * just update the status of the device.
+        */
+       __update_runtime_status(dev, RPM_ACTIVE);
 
-       ret = pm_runtime_set_active(dev);
-       if (ret)
-               goto out;
+       callback = RPM_GET_CALLBACK(dev, runtime_resume);
 
-       ret = callback(dev);
+       ret = callback ? callback(dev) : 0;
        if (ret) {
                pm_runtime_set_suspended(dev);
                goto out;
index e153e28..0f651ef 100644 (file)
@@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr,
 static ssize_t control_store(struct device * dev, struct device_attribute *attr,
                             const char * buf, size_t n)
 {
-       char *cp;
-       int len = n;
-
-       cp = memchr(buf, '\n', n);
-       if (cp)
-               len = cp - buf;
        device_lock(dev);
-       if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+       if (sysfs_streq(buf, ctrl_auto))
                pm_runtime_allow(dev);
-       else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+       else if (sysfs_streq(buf, ctrl_on))
                pm_runtime_forbid(dev);
        else
                n = -EINVAL;
@@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
        return n;
 }
 
-static DEVICE_ATTR(control, 0644, control_show, control_store);
+static DEVICE_ATTR_RW(control);
 
-static ssize_t rtpm_active_time_show(struct device *dev,
+static ssize_t runtime_active_time_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        int ret;
@@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev,
        return ret;
 }
 
-static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_active_time);
 
-static ssize_t rtpm_suspended_time_show(struct device *dev,
+static ssize_t runtime_suspended_time_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        int ret;
@@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev,
        return ret;
 }
 
-static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_suspended_time);
 
-static ssize_t rtpm_status_show(struct device *dev,
+static ssize_t runtime_status_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        const char *p;
@@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev,
        return sprintf(buf, p);
 }
 
-static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR_RO(runtime_status);
 
 static ssize_t autosuspend_delay_ms_show(struct device *dev,
                struct device_attribute *attr, char *buf)
@@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
        return n;
 }
 
-static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
-               autosuspend_delay_ms_store);
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
 
-static ssize_t pm_qos_resume_latency_show(struct device *dev,
-                                         struct device_attribute *attr,
-                                         char *buf)
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+                                            struct device_attribute *attr,
+                                            char *buf)
 {
        s32 value = dev_pm_qos_requested_resume_latency(dev);
 
        if (value == 0)
                return sprintf(buf, "n/a\n");
-       else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+       if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
                value = 0;
 
        return sprintf(buf, "%d\n", value);
 }
 
-static ssize_t pm_qos_resume_latency_store(struct device *dev,
-                                          struct device_attribute *attr,
-                                          const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+                                             struct device_attribute *attr,
+                                             const char *buf, size_t n)
 {
        s32 value;
        int ret;
@@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
 
                if (value == 0)
                        value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
-       } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+       } else if (sysfs_streq(buf, "n/a")) {
                value = 0;
        } else {
                return -EINVAL;
@@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
        return ret < 0 ? ret : n;
 }
 
-static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
-                  pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
 
-static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
-                                            struct device_attribute *attr,
-                                            char *buf)
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+                                               struct device_attribute *attr,
+                                               char *buf)
 {
        s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
 
        if (value < 0)
                return sprintf(buf, "auto\n");
-       else if (value == PM_QOS_LATENCY_ANY)
+       if (value == PM_QOS_LATENCY_ANY)
                return sprintf(buf, "any\n");
 
        return sprintf(buf, "%d\n", value);
 }
 
-static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
-                                             struct device_attribute *attr,
-                                             const char *buf, size_t n)
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+                                                struct device_attribute *attr,
+                                                const char *buf, size_t n)
 {
        s32 value;
        int ret;
@@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
                if (value < 0)
                        return -EINVAL;
        } else {
-               if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+               if (sysfs_streq(buf, "auto"))
                        value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
-               else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+               else if (sysfs_streq(buf, "any"))
                        value = PM_QOS_LATENCY_ANY;
                else
                        return -EINVAL;
@@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
        return ret < 0 ? ret : n;
 }
 
-static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
-                  pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
 
 static ssize_t pm_qos_no_power_off_show(struct device *dev,
                                        struct device_attribute *attr,
@@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
        return ret < 0 ? ret : n;
 }
 
-static DEVICE_ATTR(pm_qos_no_power_off, 0644,
-                  pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
 
 #ifdef CONFIG_PM_SLEEP
 static const char _enabled[] = "enabled";
 static const char _disabled[] = "disabled";
 
-static ssize_t
-wake_show(struct device * dev, struct device_attribute *attr, char * buf)
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+                          char *buf)
 {
        return sprintf(buf, "%s\n", device_can_wakeup(dev)
                ? (device_may_wakeup(dev) ? _enabled : _disabled)
                : "");
 }
 
-static ssize_t
-wake_store(struct device * dev, struct device_attribute *attr,
-       const char * buf, size_t n)
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t n)
 {
-       char *cp;
-       int len = n;
-
        if (!device_can_wakeup(dev))
                return -EINVAL;
 
-       cp = memchr(buf, '\n', n);
-       if (cp)
-               len = cp - buf;
-       if (len == sizeof _enabled - 1
-                       && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
+       if (sysfs_streq(buf, _enabled))
                device_set_wakeup_enable(dev, 1);
-       else if (len == sizeof _disabled - 1
-                       && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
+       else if (sysfs_streq(buf, _disabled))
                device_set_wakeup_enable(dev, 0);
        else
                return -EINVAL;
        return n;
 }
 
-static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+static DEVICE_ATTR_RW(wakeup);
 
 static ssize_t wakeup_count_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+                                struct device_attribute *attr, char *buf)
 {
        unsigned long count = 0;
        bool enabled = false;
@@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev,
        return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_count);
 
 static ssize_t wakeup_active_count_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+                                       struct device_attribute *attr,
+                                       char *buf)
 {
        unsigned long count = 0;
        bool enabled = false;
@@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev,
        return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active_count);
 
 static ssize_t wakeup_abort_count_show(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
+                                      struct device_attribute *attr,
+                                      char *buf)
 {
        unsigned long count = 0;
        bool enabled = false;
@@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
        return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_abort_count);
 
 static ssize_t wakeup_expire_count_show(struct device *dev,
                                        struct device_attribute *attr,
@@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
        return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_expire_count);
 
 static ssize_t wakeup_active_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+                                 struct device_attribute *attr, char *buf)
 {
        unsigned int active = 0;
        bool enabled = false;
@@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev,
        return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active);
 
-static ssize_t wakeup_total_time_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
 {
        s64 msec = 0;
        bool enabled = false;
@@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev,
        return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
 
-static ssize_t wakeup_max_time_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+                                      struct device_attribute *attr, char *buf)
 {
        s64 msec = 0;
        bool enabled = false;
@@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev,
        return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
 
-static ssize_t wakeup_last_time_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
 {
        s64 msec = 0;
        bool enabled = false;
@@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev,
        return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
 
 #ifdef CONFIG_PM_AUTOSLEEP
-static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
-                                             struct device_attribute *attr,
-                                             char *buf)
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+                                                struct device_attribute *attr,
+                                                char *buf)
 {
        s64 msec = 0;
        bool enabled = false;
@@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
        return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
 }
 
-static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
-                  wakeup_prevent_sleep_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
 #endif /* CONFIG_PM_AUTOSLEEP */
 #endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_PM_ADVANCED_DEBUG
-static ssize_t rtpm_usagecount_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
+static ssize_t runtime_usage_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
 {
        return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
 }
+static DEVICE_ATTR_RO(runtime_usage);
 
-static ssize_t rtpm_children_show(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
+static ssize_t runtime_active_kids_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
 {
        return sprintf(buf, "%d\n", dev->power.ignore_children ?
                0 : atomic_read(&dev->power.child_count));
 }
+static DEVICE_ATTR_RO(runtime_active_kids);
 
-static ssize_t rtpm_enabled_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+static ssize_t runtime_enabled_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
 {
-       if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+       if (dev->power.disable_depth && (dev->power.runtime_auto == false))
                return sprintf(buf, "disabled & forbidden\n");
-       else if (dev->power.disable_depth)
+       if (dev->power.disable_depth)
                return sprintf(buf, "disabled\n");
-       else if (dev->power.runtime_auto == false)
+       if (dev->power.runtime_auto == false)
                return sprintf(buf, "forbidden\n");
        return sprintf(buf, "enabled\n");
 }
-
-static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
-static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
-static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+static DEVICE_ATTR_RO(runtime_enabled);
 
 #ifdef CONFIG_PM_SLEEP
 static ssize_t async_show(struct device *dev, struct device_attribute *attr,
@@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
 static ssize_t async_store(struct device *dev, struct device_attribute *attr,
                           const char *buf, size_t n)
 {
-       char *cp;
-       int len = n;
-
-       cp = memchr(buf, '\n', n);
-       if (cp)
-               len = cp - buf;
-       if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
+       if (sysfs_streq(buf, _enabled))
                device_enable_async_suspend(dev);
-       else if (len == sizeof _disabled - 1 &&
-                strncmp(buf, _disabled, len) == 0)
+       else if (sysfs_streq(buf, _disabled))
                device_disable_async_suspend(dev);
        else
                return -EINVAL;
        return n;
 }
 
-static DEVICE_ATTR(async, 0644, async_show, async_store);
+static DEVICE_ATTR_RW(async);
 
 #endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM_ADVANCED_DEBUG */
index ae04298..a8ac86e 100644 (file)
@@ -33,7 +33,6 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
                                  struct wake_irq *wirq)
 {
        unsigned long flags;
-       int err;
 
        if (!dev || !wirq)
                return -EINVAL;
@@ -45,12 +44,11 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
                return -EEXIST;
        }
 
-       err = device_wakeup_attach_irq(dev, wirq);
-       if (!err)
-               dev->power.wakeirq = wirq;
+       dev->power.wakeirq = wirq;
+       device_wakeup_attach_irq(dev, wirq);
 
        spin_unlock_irqrestore(&dev->power.lock, flags);
-       return err;
+       return 0;
 }
 
 /**
index 38559f0..ea01621 100644 (file)
 
 #include "power.h"
 
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state        (PM_SUSPEND_ON)
+#endif
+
 /*
  * If set, the suspend/hibernate code will abort transitions to a sleep state
  * if wakeup events are registered during or immediately before the transition.
@@ -268,6 +273,9 @@ int device_wakeup_enable(struct device *dev)
        if (!dev || !dev->power.can_wakeup)
                return -EINVAL;
 
+       if (pm_suspend_target_state != PM_SUSPEND_ON)
+               dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
        ws = wakeup_source_register(dev_name(dev));
        if (!ws)
                return -ENOMEM;
@@ -291,22 +299,19 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
  *
  * Call under the device's power.lock lock.
  */
-int device_wakeup_attach_irq(struct device *dev,
+void device_wakeup_attach_irq(struct device *dev,
                             struct wake_irq *wakeirq)
 {
        struct wakeup_source *ws;
 
        ws = dev->power.wakeup;
-       if (!ws) {
-               dev_err(dev, "forgot to call call device_init_wakeup?\n");
-               return -EINVAL;
-       }
+       if (!ws)
+               return;
 
        if (ws->wakeirq)
-               return -EEXIST;
+               dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
 
        ws->wakeirq = wakeirq;
-       return 0;
 }
 
 /**
@@ -448,9 +453,7 @@ int device_init_wakeup(struct device *dev, bool enable)
                device_set_wakeup_capable(dev, true);
                ret = device_wakeup_enable(dev);
        } else {
-               if (dev->power.can_wakeup)
-                       device_wakeup_disable(dev);
-
+               device_wakeup_disable(dev);
                device_set_wakeup_capable(dev, false);
        }
 
@@ -464,9 +467,6 @@ EXPORT_SYMBOL_GPL(device_init_wakeup);
  */
 int device_set_wakeup_enable(struct device *dev, bool enable)
 {
-       if (!dev || !dev->power.can_wakeup)
-               return -EINVAL;
-
        return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
 }
 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
index 02d78f6..ba8acca 100644 (file)
@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI
 
 config BCMA_DRIVER_PCI_HOSTMODE
        bool "Driver for PCI core working in hostmode"
-       depends on MIPS && BCMA_DRIVER_PCI
+       depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY
        help
          PCI core hostmode operation (external PCI bus).
 
index bc8e615..d5fe720 100644 (file)
@@ -1581,9 +1581,8 @@ out:
        return err;
 }
 
-static void lo_release(struct gendisk *disk, fmode_t mode)
+static void __lo_release(struct loop_device *lo)
 {
-       struct loop_device *lo = disk->private_data;
        int err;
 
        if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
        mutex_unlock(&lo->lo_ctl_mutex);
 }
 
+static void lo_release(struct gendisk *disk, fmode_t mode)
+{
+       mutex_lock(&loop_index_mutex);
+       __lo_release(disk->private_data);
+       mutex_unlock(&loop_index_mutex);
+}
+
 static const struct block_device_operations lo_fops = {
        .owner =        THIS_MODULE,
        .open =         lo_open,
index c61960d..ad0477a 100644 (file)
@@ -35,13 +35,13 @@ static inline u64 mb_per_tick(int mbps)
 struct nullb_cmd {
        struct list_head list;
        struct llist_node ll_list;
-       call_single_data_t csd;
+       struct __call_single_data csd;
        struct request *rq;
        struct bio *bio;
        unsigned int tag;
+       blk_status_t error;
        struct nullb_queue *nq;
        struct hrtimer timer;
-       blk_status_t error;
 };
 
 struct nullb_queue {
@@ -471,7 +471,6 @@ static void nullb_device_release(struct config_item *item)
 {
        struct nullb_device *dev = to_nullb_device(item);
 
-       badblocks_exit(&dev->badblocks);
        null_free_device_storage(dev, false);
        null_free_dev(dev);
 }
@@ -582,6 +581,10 @@ static struct nullb_device *null_alloc_dev(void)
 
 static void null_free_dev(struct nullb_device *dev)
 {
+       if (!dev)
+               return;
+
+       badblocks_exit(&dev->badblocks);
        kfree(dev);
 }
 
index 38fc5f3..cc93522 100644 (file)
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
        mutex_unlock(&rbd_dev->watch_mutex);
 }
 
+static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
+{
+       struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+
+       strcpy(rbd_dev->lock_cookie, cookie);
+       rbd_set_owner_cid(rbd_dev, &cid);
+       queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+}
+
 /*
  * lock_rwsem must be held for write
  */
 static int rbd_lock(struct rbd_device *rbd_dev)
 {
        struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
-       struct rbd_client_id cid = rbd_get_cid(rbd_dev);
        char cookie[32];
        int ret;
 
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
                return ret;
 
        rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
-       strcpy(rbd_dev->lock_cookie, cookie);
-       rbd_set_owner_cid(rbd_dev, &cid);
-       queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+       __rbd_lock(rbd_dev, cookie);
        return 0;
 }
 
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
                        queue_delayed_work(rbd_dev->task_wq,
                                           &rbd_dev->lock_dwork, 0);
        } else {
-               strcpy(rbd_dev->lock_cookie, cookie);
+               __rbd_lock(rbd_dev, cookie);
        }
 }
 
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        segment_size = rbd_obj_bytes(&rbd_dev->header);
        blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
        q->limits.max_sectors = queue_max_hw_sectors(q);
-       blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
+       blk_queue_max_segments(q, USHRT_MAX);
        blk_queue_max_segment_size(q, segment_size);
        blk_queue_io_min(q, segment_size);
        blk_queue_io_opt(q, segment_size);
index dc7b3c7..57e011d 100644 (file)
@@ -120,7 +120,7 @@ config QCOM_EBI2
          SRAM, ethernet adapters, FPGAs and LCD displays.
 
 config SIMPLE_PM_BUS
-       bool "Simple Power-Managed Bus Driver"
+       tristate "Simple Power-Managed Bus Driver"
        depends on OF && PM
        help
          Driver for transparent busses that don't need a real driver, but
index 3c29d36..5426c04 100644 (file)
@@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
        raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
        mutex_init(&cci_pmu->reserve_mutex);
        atomic_set(&cci_pmu->active_events, 0);
-       cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
+       cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
 
        ret = cci_pmu_init(cci_pmu, pdev);
-       if (ret)
+       if (ret) {
+               put_cpu();
                return ret;
+       }
 
        cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
                                         &cci_pmu->node);
+       put_cpu();
        pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
        return 0;
 }
index 3063f53..b52332e 100644 (file)
@@ -262,7 +262,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
        NULL
 };
 
-static struct attribute_group arm_ccn_pmu_format_attr_group = {
+static const struct attribute_group arm_ccn_pmu_format_attr_group = {
        .name = "format",
        .attrs = arm_ccn_pmu_format_attrs,
 };
@@ -451,7 +451,7 @@ static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
 static struct attribute
                *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
 
-static struct attribute_group arm_ccn_pmu_events_attr_group = {
+static const struct attribute_group arm_ccn_pmu_events_attr_group = {
        .name = "events",
        .is_visible = arm_ccn_pmu_events_is_visible,
        .attrs = arm_ccn_pmu_events_attrs,
@@ -548,7 +548,7 @@ static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
        NULL
 };
 
-static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
+static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
        .name = "cmp_mask",
        .attrs = arm_ccn_pmu_cmp_mask_attrs,
 };
@@ -569,7 +569,7 @@ static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
        NULL,
 };
 
-static struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
+static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
        .attrs = arm_ccn_pmu_cpumask_attrs,
 };
 
@@ -1268,10 +1268,12 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
        if (ccn->dt.id == 0) {
                name = "ccn";
        } else {
-               int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
-
-               name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
-               snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
+               name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d",
+                                     ccn->dt.id);
+               if (!name) {
+                       err = -ENOMEM;
+                       goto error_choose_name;
+               }
        }
 
        /* Perf driver registration */
@@ -1298,7 +1300,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
        }
 
        /* Pick one CPU which we will use to collect data from CCN... */
-       cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
+       cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
 
        /* Also make sure that the overflow interrupt is handled by this CPU */
        if (ccn->irq) {
@@ -1315,10 +1317,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
 
        cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
                                         &ccn->dt.node);
+       put_cpu();
        return 0;
 
 error_pmu_register:
 error_set_affinity:
+       put_cpu();
+error_choose_name:
        ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
        for (i = 0; i < ccn->num_xps; i++)
                writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1581,8 +1586,8 @@ static int __init arm_ccn_init(void)
 
 static void __exit arm_ccn_exit(void)
 {
-       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
        platform_driver_unregister(&arm_ccn_driver);
+       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
 }
 
 module_init(arm_ccn_init);
index 328ca93..1b76d95 100644 (file)
@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
        .match          = sunxi_rsb_device_match,
        .probe          = sunxi_rsb_device_probe,
        .remove         = sunxi_rsb_device_remove,
+       .uevent         = of_device_uevent_modalias,
 };
 
 static void sunxi_rsb_dev_release(struct device *dev)
index 779869e..71fad74 100644 (file)
@@ -199,6 +199,9 @@ struct smi_info {
        /* The timer for this si. */
        struct timer_list   si_timer;
 
+       /* This flag is set, if the timer can be set */
+       bool                timer_can_start;
+
        /* This flag is set, if the timer is running (timer_pending() isn't enough) */
        bool                timer_running;
 
@@ -355,6 +358,8 @@ out:
 
 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
 {
+       if (!smi_info->timer_can_start)
+               return;
        smi_info->last_timeout_jiffies = jiffies;
        mod_timer(&smi_info->si_timer, new_val);
        smi_info->timer_running = true;
@@ -374,21 +379,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
        smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
 }
 
-static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+static void start_check_enables(struct smi_info *smi_info)
 {
        unsigned char msg[2];
 
        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-       if (start_timer)
-               start_new_msg(smi_info, msg, 2);
-       else
-               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+       start_new_msg(smi_info, msg, 2);
        smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+static void start_clear_flags(struct smi_info *smi_info)
 {
        unsigned char msg[3];
 
@@ -397,10 +399,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
        msg[2] = WDT_PRE_TIMEOUT_INT;
 
-       if (start_timer)
-               start_new_msg(smi_info, msg, 3);
-       else
-               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+       start_new_msg(smi_info, msg, 3);
        smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -435,11 +434,11 @@ static void start_getting_events(struct smi_info *smi_info)
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+static inline bool disable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = true;
-               start_check_enables(smi_info, start_timer);
+               start_check_enables(smi_info);
                return true;
        }
        return false;
@@ -449,7 +448,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = false;
-               start_check_enables(smi_info, true);
+               start_check_enables(smi_info);
                return true;
        }
        return false;
@@ -467,7 +466,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 
        msg = ipmi_alloc_smi_msg();
        if (!msg) {
-               if (!disable_si_irq(smi_info, true))
+               if (!disable_si_irq(smi_info))
                        smi_info->si_state = SI_NORMAL;
        } else if (enable_si_irq(smi_info)) {
                ipmi_free_smi_msg(msg);
@@ -483,7 +482,7 @@ retry:
                /* Watchdog pre-timeout */
                smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-               start_clear_flags(smi_info, true);
+               start_clear_flags(smi_info);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
                if (smi_info->intf)
                        ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -866,7 +865,7 @@ restart:
                 * disable and messages disabled.
                 */
                if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
-                       start_check_enables(smi_info, true);
+                       start_check_enables(smi_info);
                } else {
                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
                        if (!smi_info->curr_msg)
@@ -1167,6 +1166,7 @@ static int smi_start_processing(void       *send_info,
 
        /* Set up the timer that drives the interface. */
        timer_setup(&new_smi->si_timer, smi_timeout, 0);
+       new_smi->timer_can_start = true;
        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
        /* Try to claim any interrupts. */
@@ -1936,10 +1936,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
        check_set_rcv_irq(smi_info);
 }
 
-static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
 {
        if (smi_info->thread != NULL)
                kthread_stop(smi_info->thread);
+
+       smi_info->timer_can_start = false;
        if (smi_info->timer_running)
                del_timer_sync(&smi_info->si_timer);
 }
@@ -2152,7 +2154,7 @@ static int try_smi_init(struct smi_info *new_smi)
         * Start clearing the flags before we enable interrupts or the
         * timer to avoid racing with the timer.
         */
-       start_clear_flags(new_smi, false);
+       start_clear_flags(new_smi);
 
        /*
         * IRQ is defined to be set when non-zero.  req_events will
@@ -2238,7 +2240,7 @@ out_err_remove_attrs:
        dev_set_drvdata(new_smi->io.dev, NULL);
 
 out_err_stop_timer:
-       wait_for_timer_and_thread(new_smi);
+       stop_timer_and_thread(new_smi);
 
 out_err:
        new_smi->interrupt_disabled = true;
@@ -2388,7 +2390,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
         */
        if (to_clean->io.irq_cleanup)
                to_clean->io.irq_cleanup(&to_clean->io);
-       wait_for_timer_and_thread(to_clean);
+       stop_timer_and_thread(to_clean);
 
        /*
         * Timeouts are stopped, now make sure the interrupts are off
@@ -2400,7 +2402,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
                schedule_timeout_uninterruptible(1);
        }
        if (to_clean->handlers)
-               disable_si_irq(to_clean, false);
+               disable_si_irq(to_clean);
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
index 090b073..6b10f0e 100644 (file)
@@ -10,6 +10,8 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
 {
        struct si_sm_io io;
 
+       memset(&io, 0, sizeof(io));
+
        io.si_type      = SI_KCS;
        io.addr_source  = SI_DEVICETREE;
        io.addr_type    = IPMI_MEM_ADDR_SPACE;
index 99771f5..27dd11c 100644 (file)
@@ -103,10 +103,13 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
        io.addr_source_cleanup = ipmi_pci_cleanup;
        io.addr_source_data = pdev;
 
-       if (pci_resource_flags(pdev, 0) & IORESOURCE_IO)
+       if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
                io.addr_type = IPMI_IO_ADDR_SPACE;
-       else
+               io.io_setup = ipmi_si_port_setup;
+       } else {
                io.addr_type = IPMI_MEM_ADDR_SPACE;
+               io.io_setup = ipmi_si_mem_setup;
+       }
        io.addr_data = pci_resource_start(pdev, 0);
 
        io.regspacing = ipmi_pci_probe_regspacing(&io);
index 647d056..b56c11f 100644 (file)
@@ -220,7 +220,8 @@ static bool clk_core_is_enabled(struct clk_core *core)
 
        ret = core->ops->is_enabled(core->hw);
 done:
-       clk_pm_runtime_put(core);
+       if (core->dev)
+               pm_runtime_put(core->dev);
 
        return ret;
 }
@@ -1564,6 +1565,9 @@ static void clk_change_rate(struct clk_core *core)
                best_parent_rate = core->parent->rate;
        }
 
+       if (clk_pm_runtime_get(core))
+               return;
+
        if (core->flags & CLK_SET_RATE_UNGATE) {
                unsigned long flags;
 
@@ -1634,6 +1638,8 @@ static void clk_change_rate(struct clk_core *core)
        /* handle the new child who might not be in core->children yet */
        if (core->new_child)
                clk_change_rate(core->new_child);
+
+       clk_pm_runtime_put(core);
 }
 
 static int clk_core_set_rate_nolock(struct clk_core *core,
index a1a6342..f00d875 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
        return 0;
 }
 
+static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
+                                unsigned long id)
+{
+       sun9i_mmc_reset_assert(rcdev, id);
+       udelay(10);
+       sun9i_mmc_reset_deassert(rcdev, id);
+
+       return 0;
+}
+
 static const struct reset_control_ops sun9i_mmc_reset_ops = {
        .assert         = sun9i_mmc_reset_assert,
        .deassert       = sun9i_mmc_reset_deassert,
+       .reset          = sun9i_mmc_reset_reset,
 };
 
 static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
index 4ebae43..d8addbc 100644 (file)
@@ -275,6 +275,7 @@ config BMIPS_CPUFREQ
 
 config LOONGSON2_CPUFREQ
        tristate "Loongson2 CPUFreq Driver"
+       depends on LEMOTE_MACH2F
        help
          This option adds a CPUFreq driver for loongson processors which
          support software configurable cpu frequency.
@@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ
 
 config LOONGSON1_CPUFREQ
        tristate "Loongson1 CPUFreq Driver"
+       depends on LOONGSON1_LS1B
        help
          This option adds a CPUFreq driver for loongson1 processors which
          support software configurable cpu frequency.
index bdce448..3a88e33 100644 (file)
@@ -2,6 +2,29 @@
 # ARM CPU Frequency scaling drivers
 #
 
+config ACPI_CPPC_CPUFREQ
+       tristate "CPUFreq driver based on the ACPI CPPC spec"
+       depends on ACPI_PROCESSOR
+       select ACPI_CPPC_LIB
+       help
+         This adds a CPUFreq driver which uses CPPC methods
+         as described in the ACPIv5.1 spec. CPPC stands for
+         Collaborative Processor Performance Controls. It
+         is based on an abstract continuous scale of CPU
+         performance values which allows the remote power
+         processor to flexibly optimize for power and
+         performance. CPPC relies on power management firmware
+         support for its operation.
+
+         If in doubt, say N.
+
+config ARM_ARMADA_37XX_CPUFREQ
+       tristate "Armada 37xx CPUFreq support"
+       depends on ARCH_MVEBU
+       help
+         This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
+         The Armada 37xx PMU supports 4 frequency and VDD levels.
+
 # big LITTLE core layer and glue drivers
 config ARM_BIG_LITTLE_CPUFREQ
        tristate "Generic ARM big LITTLE CPUfreq driver"
@@ -12,6 +35,30 @@ config ARM_BIG_LITTLE_CPUFREQ
        help
          This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
 
+config ARM_DT_BL_CPUFREQ
+       tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+       depends on ARM_BIG_LITTLE_CPUFREQ && OF
+       help
+         This enables probing via DT for Generic CPUfreq driver for ARM
+         big.LITTLE platform. This gets frequency tables from DT.
+
+config ARM_SCPI_CPUFREQ
+       tristate "SCPI based CPUfreq driver"
+       depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+       help
+         This adds the CPUfreq driver support for ARM big.LITTLE platforms
+         using SCPI protocol for CPU power management.
+
+         This driver uses SCPI Message Protocol driver to interact with the
+         firmware providing the CPU DVFS functionality.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+       tristate "Versatile Express SPC based CPUfreq driver"
+       depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+       help
+         This add the CPUfreq driver support for Versatile Express
+         big.LITTLE platforms using SPC for power management.
+
 config ARM_BRCMSTB_AVS_CPUFREQ
        tristate "Broadcom STB AVS CPUfreq driver"
        depends on ARCH_BRCMSTB || COMPILE_TEST
@@ -33,20 +80,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
 
          If in doubt, say N.
 
-config ARM_DT_BL_CPUFREQ
-       tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
-       depends on ARM_BIG_LITTLE_CPUFREQ && OF
-       help
-         This enables probing via DT for Generic CPUfreq driver for ARM
-         big.LITTLE platform. This gets frequency tables from DT.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
-        tristate "Versatile Express SPC based CPUfreq driver"
-       depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
-        help
-          This add the CPUfreq driver support for Versatile Express
-         big.LITTLE platforms using SPC for power management.
-
 config ARM_EXYNOS5440_CPUFREQ
        tristate "SAMSUNG EXYNOS5440"
        depends on SOC_EXYNOS5440
@@ -205,16 +238,6 @@ config ARM_SA1100_CPUFREQ
 config ARM_SA1110_CPUFREQ
        bool
 
-config ARM_SCPI_CPUFREQ
-        tristate "SCPI based CPUfreq driver"
-       depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
-        help
-         This adds the CPUfreq driver support for ARM big.LITTLE platforms
-         using SCPI protocol for CPU power management.
-
-         This driver uses SCPI Message Protocol driver to interact with the
-         firmware providing the CPU DVFS functionality.
-
 config ARM_SPEAR_CPUFREQ
        bool "SPEAr CPUFreq support"
        depends on PLAT_SPEAR
@@ -275,20 +298,3 @@ config ARM_PXA2xx_CPUFREQ
          This add the CPUFreq driver support for Intel PXA2xx SOCs.
 
          If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ
-       tristate "CPUFreq driver based on the ACPI CPPC spec"
-       depends on ACPI_PROCESSOR
-       select ACPI_CPPC_LIB
-       default n
-       help
-         This adds a CPUFreq driver which uses CPPC methods
-         as described in the ACPIv5.1 spec. CPPC stands for
-         Collaborative Processor Performance Controls. It
-         is based on an abstract continuous scale of CPU
-         performance values which allows the remote power
-         processor to flexibly optimize for power and
-         performance. CPPC relies on power management firmware
-         support for its operation.
-
-         If in doubt, say N.
index 812f9e0..e07715c 100644 (file)
@@ -52,23 +52,26 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ)        += arm_big_little.o
 # LITTLE drivers, so that it is probed last.
 obj-$(CONFIG_ARM_DT_BL_CPUFREQ)                += arm_big_little_dt.o
 
+obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ)  += armada-37xx-cpufreq.o
 obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ)  += brcmstb-avs-cpufreq.o
+obj-$(CONFIG_ACPI_CPPC_CPUFREQ)                += cppc_cpufreq.o
 obj-$(CONFIG_ARCH_DAVINCI)             += davinci-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)   += exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)     += highbank-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)                += imx6q-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)     += kirkwood-cpufreq.o
 obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ)     += mediatek-cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7)            += mvebu-cpufreq.o
 obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)    += omap-cpufreq.o
 obj-$(CONFIG_ARM_PXA2xx_CPUFREQ)       += pxa2xx-cpufreq.o
 obj-$(CONFIG_PXA3xx)                   += pxa3xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ)      += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
 obj-$(CONFIG_ARM_S3C2410_CPUFREQ)      += s3c2410-cpufreq.o
 obj-$(CONFIG_ARM_S3C2412_CPUFREQ)      += s3c2412-cpufreq.o
 obj-$(CONFIG_ARM_S3C2416_CPUFREQ)      += s3c2416-cpufreq.o
 obj-$(CONFIG_ARM_S3C2440_CPUFREQ)      += s3c2440-cpufreq.o
 obj-$(CONFIG_ARM_S3C64XX_CPUFREQ)      += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ)      += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
 obj-$(CONFIG_ARM_S5PV210_CPUFREQ)      += s5pv210-cpufreq.o
 obj-$(CONFIG_ARM_SA1100_CPUFREQ)       += sa1100-cpufreq.o
 obj-$(CONFIG_ARM_SA1110_CPUFREQ)       += sa1110-cpufreq.o
@@ -81,8 +84,6 @@ obj-$(CONFIG_ARM_TEGRA124_CPUFREQ)    += tegra124-cpufreq.o
 obj-$(CONFIG_ARM_TEGRA186_CPUFREQ)     += tegra186-cpufreq.o
 obj-$(CONFIG_ARM_TI_CPUFREQ)           += ti-cpufreq.o
 obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
-obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
-obj-$(CONFIG_MACH_MVEBU_V7)            += mvebu-cpufreq.o
 
 
 ##################################################################################
index 65ec5f0..c56b57d 100644 (file)
@@ -526,34 +526,13 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
 
 static void bL_cpufreq_ready(struct cpufreq_policy *policy)
 {
-       struct device *cpu_dev = get_cpu_device(policy->cpu);
        int cur_cluster = cpu_to_cluster(policy->cpu);
-       struct device_node *np;
 
        /* Do not register a cpu_cooling device if we are in IKS mode */
        if (cur_cluster >= MAX_CLUSTERS)
                return;
 
-       np = of_node_get(cpu_dev->of_node);
-       if (WARN_ON(!np))
-               return;
-
-       if (of_find_property(np, "#cooling-cells", NULL)) {
-               u32 power_coefficient = 0;
-
-               of_property_read_u32(np, "dynamic-power-coefficient",
-                                    &power_coefficient);
-
-               cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
-                               policy, power_coefficient, NULL);
-               if (IS_ERR(cdev[cur_cluster])) {
-                       dev_err(cpu_dev,
-                               "running cpufreq without cooling device: %ld\n",
-                               PTR_ERR(cdev[cur_cluster]));
-                       cdev[cur_cluster] = NULL;
-               }
-       }
-       of_node_put(np);
+       cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
 }
 
 static struct cpufreq_driver bL_cpufreq_driver = {
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
new file mode 100644 (file)
index 0000000..c6ebc88
--- /dev/null
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPU frequency scaling support for Armada 37xx platform.
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Power management in North Bridge register set */
+#define ARMADA_37XX_NB_L0L1    0x18
+#define ARMADA_37XX_NB_L2L3    0x1C
+#define  ARMADA_37XX_NB_TBG_DIV_OFF    13
+#define  ARMADA_37XX_NB_TBG_DIV_MASK   0x7
+#define  ARMADA_37XX_NB_CLK_SEL_OFF    11
+#define  ARMADA_37XX_NB_CLK_SEL_MASK   0x1
+#define  ARMADA_37XX_NB_CLK_SEL_TBG    0x1
+#define  ARMADA_37XX_NB_TBG_SEL_OFF    9
+#define  ARMADA_37XX_NB_TBG_SEL_MASK   0x3
+#define  ARMADA_37XX_NB_VDD_SEL_OFF    6
+#define  ARMADA_37XX_NB_VDD_SEL_MASK   0x3
+#define  ARMADA_37XX_NB_CONFIG_SHIFT   16
+#define ARMADA_37XX_NB_DYN_MOD 0x24
+#define  ARMADA_37XX_NB_CLK_SEL_EN     BIT(26)
+#define  ARMADA_37XX_NB_TBG_EN         BIT(28)
+#define  ARMADA_37XX_NB_DIV_EN         BIT(29)
+#define  ARMADA_37XX_NB_VDD_EN         BIT(30)
+#define  ARMADA_37XX_NB_DFS_EN         BIT(31)
+#define ARMADA_37XX_NB_CPU_LOAD 0x30
+#define  ARMADA_37XX_NB_CPU_LOAD_MASK  0x3
+#define  ARMADA_37XX_DVFS_LOAD_0       0
+#define  ARMADA_37XX_DVFS_LOAD_1       1
+#define  ARMADA_37XX_DVFS_LOAD_2       2
+#define  ARMADA_37XX_DVFS_LOAD_3       3
+
+/*
+ * On Armada 37xx the Power management manages 4 level of CPU load,
+ * each level can be associated with a CPU clock source, a CPU
+ * divider, a VDD level, etc...
+ */
+#define LOAD_LEVEL_NR  4
+
+struct armada_37xx_dvfs {
+       u32 cpu_freq_max;
+       u8 divider[LOAD_LEVEL_NR];
+};
+
+static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
+       {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+       {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
+       {.cpu_freq_max = 800*1000*1000,  .divider = {1, 2, 3, 4} },
+       {.cpu_freq_max = 600*1000*1000,  .divider = {2, 4, 5, 6} },
+};
+
+static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) {
+               if (freq == armada_37xx_dvfs[i].cpu_freq_max)
+                       return &armada_37xx_dvfs[i];
+       }
+
+       pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000);
+       return NULL;
+}
+
+/*
+ * Setup the four level managed by the hardware. Once the four level
+ * will be configured then the DVFS will be enabled.
+ */
+static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+                                                struct clk *clk, u8 *divider)
+{
+       int load_lvl;
+       struct clk *parent;
+
+       for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+               unsigned int reg, mask, val, offset = 0;
+
+               if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1)
+                       reg = ARMADA_37XX_NB_L0L1;
+               else
+                       reg = ARMADA_37XX_NB_L2L3;
+
+               if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 ||
+                   load_lvl == ARMADA_37XX_DVFS_LOAD_2)
+                       offset += ARMADA_37XX_NB_CONFIG_SHIFT;
+
+               /* Set cpu clock source, for all the level we use TBG */
+               val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF;
+               mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+                       << ARMADA_37XX_NB_CLK_SEL_OFF);
+
+               /*
+                * Set cpu divider based on the pre-computed array in
+                * order to have balanced step.
+                */
+               val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF;
+               mask |= (ARMADA_37XX_NB_TBG_DIV_MASK
+                       << ARMADA_37XX_NB_TBG_DIV_OFF);
+
+               /* Set VDD divider which is actually the load level. */
+               val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF;
+               mask |= (ARMADA_37XX_NB_VDD_SEL_MASK
+                       << ARMADA_37XX_NB_VDD_SEL_OFF);
+
+               val <<= offset;
+               mask <<= offset;
+
+               regmap_update_bits(base, reg, mask, val);
+       }
+
+       /*
+        * Set cpu clock source, for all the level we keep the same
+        * clock source that the one already configured. For this one
+        * we need to use the clock framework
+        */
+       parent = clk_get_parent(clk);
+       clk_set_parent(clk, parent);
+}
+
+static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base)
+{
+       unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
+               mask = ARMADA_37XX_NB_DFS_EN;
+
+       regmap_update_bits(base, reg, mask, 0);
+}
+
+static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
+{
+       unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD,
+               mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+       /* Start with the highest load (0) */
+       val = ARMADA_37XX_DVFS_LOAD_0;
+       regmap_update_bits(base, reg, mask, val);
+
+       /* Now enable DVFS for the CPUs */
+       reg = ARMADA_37XX_NB_DYN_MOD;
+       mask =  ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN |
+               ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN |
+               ARMADA_37XX_NB_DFS_EN;
+
+       regmap_update_bits(base, reg, mask, mask);
+}
+
+static int __init armada37xx_cpufreq_driver_init(void)
+{
+       struct armada_37xx_dvfs *dvfs;
+       struct platform_device *pdev;
+       unsigned int cur_frequency;
+       struct regmap *nb_pm_base;
+       struct device *cpu_dev;
+       int load_lvl, ret;
+       struct clk *clk;
+
+       nb_pm_base =
+               syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+
+       if (IS_ERR(nb_pm_base))
+               return -ENODEV;
+
+       /* Before doing any configuration on the DVFS first, disable it */
+       armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+
+       /*
+        * On CPU 0 register the operating points supported (which are
+        * the nominal CPU frequency and full integer divisions of
+        * it).
+        */
+       cpu_dev = get_cpu_device(0);
+       if (!cpu_dev) {
+               dev_err(cpu_dev, "Cannot get CPU\n");
+               return -ENODEV;
+       }
+
+       clk = clk_get(cpu_dev, 0);
+       if (IS_ERR(clk)) {
+               dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+               return PTR_ERR(clk);
+       }
+
+       /* Get nominal (current) CPU frequency */
+       cur_frequency = clk_get_rate(clk);
+       if (!cur_frequency) {
+               dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
+               return -EINVAL;
+       }
+
+       dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
+       if (!dvfs)
+               return -EINVAL;
+
+       armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+
+       for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+            load_lvl++) {
+               unsigned long freq = cur_frequency / dvfs->divider[load_lvl];
+
+               ret = dev_pm_opp_add(cpu_dev, freq, 0);
+               if (ret) {
+                       /* clean-up the already added opp before leaving */
+                       while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+                               freq = cur_frequency / dvfs->divider[load_lvl];
+                               dev_pm_opp_remove(cpu_dev, freq);
+                       }
+                       return ret;
+               }
+       }
+
+       /* Now that everything is setup, enable the DVFS at hardware level */
+       armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+
+       pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+       return PTR_ERR_OR_ZERO(pdev);
+}
+/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+late_initcall(armada37xx_cpufreq_driver_init);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+MODULE_LICENSE("GPL");
index ecc56e2..3b585e4 100644 (file)
@@ -108,6 +108,14 @@ static const struct of_device_id blacklist[] __initconst = {
 
        { .compatible = "marvell,armadaxp", },
 
+       { .compatible = "mediatek,mt2701", },
+       { .compatible = "mediatek,mt2712", },
+       { .compatible = "mediatek,mt7622", },
+       { .compatible = "mediatek,mt7623", },
+       { .compatible = "mediatek,mt817x", },
+       { .compatible = "mediatek,mt8173", },
+       { .compatible = "mediatek,mt8176", },
+
        { .compatible = "nvidia,tegra124", },
 
        { .compatible = "st,stih407", },
index 545946a..de3d104 100644 (file)
@@ -319,33 +319,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
 static void cpufreq_ready(struct cpufreq_policy *policy)
 {
        struct private_data *priv = policy->driver_data;
-       struct device_node *np = of_node_get(priv->cpu_dev->of_node);
 
-       if (WARN_ON(!np))
-               return;
-
-       /*
-        * For now, just loading the cooling device;
-        * thermal DT code takes care of matching them.
-        */
-       if (of_find_property(np, "#cooling-cells", NULL)) {
-               u32 power_coefficient = 0;
-
-               of_property_read_u32(np, "dynamic-power-coefficient",
-                                    &power_coefficient);
-
-               priv->cdev = of_cpufreq_power_cooling_register(np,
-                               policy, power_coefficient, NULL);
-               if (IS_ERR(priv->cdev)) {
-                       dev_err(priv->cpu_dev,
-                               "running cpufreq without cooling device: %ld\n",
-                               PTR_ERR(priv->cdev));
-
-                       priv->cdev = NULL;
-               }
-       }
-
-       of_node_put(np);
+       priv->cdev = of_cpufreq_cooling_register(policy);
 }
 
 static struct cpufreq_driver dt_cpufreq_driver = {
index 41d148a..421f318 100644 (file)
@@ -601,19 +601,18 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
 /**
  * cpufreq_parse_governor - parse a governor string
  */
-static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
-                               struct cpufreq_governor **governor)
+static int cpufreq_parse_governor(char *str_governor,
+                                 struct cpufreq_policy *policy)
 {
-       int err = -EINVAL;
-
        if (cpufreq_driver->setpolicy) {
                if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
-                       *policy = CPUFREQ_POLICY_PERFORMANCE;
-                       err = 0;
-               } else if (!strncasecmp(str_governor, "powersave",
-                                               CPUFREQ_NAME_LEN)) {
-                       *policy = CPUFREQ_POLICY_POWERSAVE;
-                       err = 0;
+                       policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+                       return 0;
+               }
+
+               if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+                       policy->policy = CPUFREQ_POLICY_POWERSAVE;
+                       return 0;
                }
        } else {
                struct cpufreq_governor *t;
@@ -621,26 +620,31 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
                mutex_lock(&cpufreq_governor_mutex);
 
                t = find_governor(str_governor);
-
-               if (t == NULL) {
+               if (!t) {
                        int ret;
 
                        mutex_unlock(&cpufreq_governor_mutex);
+
                        ret = request_module("cpufreq_%s", str_governor);
-                       mutex_lock(&cpufreq_governor_mutex);
+                       if (ret)
+                               return -EINVAL;
 
-                       if (ret == 0)
-                               t = find_governor(str_governor);
-               }
+                       mutex_lock(&cpufreq_governor_mutex);
 
-               if (t != NULL) {
-                       *governor = t;
-                       err = 0;
+                       t = find_governor(str_governor);
                }
+               if (t && !try_module_get(t->owner))
+                       t = NULL;
 
                mutex_unlock(&cpufreq_governor_mutex);
+
+               if (t) {
+                       policy->governor = t;
+                       return 0;
+               }
        }
-       return err;
+
+       return -EINVAL;
 }
 
 /**
@@ -760,11 +764,14 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
        if (ret != 1)
                return -EINVAL;
 
-       if (cpufreq_parse_governor(str_governor, &new_policy.policy,
-                                               &new_policy.governor))
+       if (cpufreq_parse_governor(str_governor, &new_policy))
                return -EINVAL;
 
        ret = cpufreq_set_policy(policy, &new_policy);
+
+       if (new_policy.governor)
+               module_put(new_policy.governor->owner);
+
        return ret ? ret : count;
 }
 
@@ -1044,8 +1051,7 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
                if (policy->last_policy)
                        new_policy.policy = policy->last_policy;
                else
-                       cpufreq_parse_governor(gov->name, &new_policy.policy,
-                                              NULL);
+                       cpufreq_parse_governor(gov->name, &new_policy);
        }
        /* set default policy */
        return cpufreq_set_policy(policy, &new_policy);
@@ -2160,7 +2166,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
        mutex_lock(&cpufreq_governor_mutex);
        list_del(&governor->governor_list);
        mutex_unlock(&cpufreq_governor_mutex);
-       return;
 }
 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
 
index 58d4f4e..ca38229 100644 (file)
@@ -22,6 +22,8 @@
 
 #include "cpufreq_governor.h"
 
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL      (2 * TICK_NSEC / NSEC_PER_USEC)
+
 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
 
 static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 {
        struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct policy_dbs_info *policy_dbs;
+       unsigned int sampling_interval;
        int ret;
-       ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
-       if (ret != 1)
+
+       ret = sscanf(buf, "%u", &sampling_interval);
+       if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
                return -EINVAL;
 
+       dbs_data->sampling_rate = sampling_interval;
+
        /*
         * We are operating under dbs_data->mutex and so the list and its
         * entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
        if (ret)
                goto free_policy_dbs_info;
 
-       dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
+       /*
+        * The sampling interval should not be less than the transition latency
+        * of the CPU and it also cannot be too small for dbs_update() to work
+        * correctly.
+        */
+       dbs_data->sampling_rate = max_t(unsigned int,
+                                       CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+                                       cpufreq_policy_transition_delay_us(policy));
 
        if (!have_governor_per_policy())
                gov->gdbs_data = dbs_data;
index 1e55b57..1572129 100644 (file)
@@ -27,7 +27,7 @@ struct cpufreq_stats {
        unsigned int *trans_table;
 };
 
-static int cpufreq_stats_update(struct cpufreq_stats *stats)
+static void cpufreq_stats_update(struct cpufreq_stats *stats)
 {
        unsigned long long cur_time = get_jiffies_64();
 
@@ -35,7 +35,6 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
        stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
        stats->last_time = cur_time;
        spin_unlock(&cpufreq_stats_lock);
-       return 0;
 }
 
 static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
index 628fe89..741f22e 100644 (file)
@@ -25,15 +25,29 @@ static struct regulator *arm_reg;
 static struct regulator *pu_reg;
 static struct regulator *soc_reg;
 
-static struct clk *arm_clk;
-static struct clk *pll1_sys_clk;
-static struct clk *pll1_sw_clk;
-static struct clk *step_clk;
-static struct clk *pll2_pfd2_396m_clk;
-
-/* clk used by i.MX6UL */
-static struct clk *pll2_bus_clk;
-static struct clk *secondary_sel_clk;
+enum IMX6_CPUFREQ_CLKS {
+       ARM,
+       PLL1_SYS,
+       STEP,
+       PLL1_SW,
+       PLL2_PFD2_396M,
+       /* MX6UL requires two more clks */
+       PLL2_BUS,
+       SECONDARY_SEL,
+};
+#define IMX6Q_CPUFREQ_CLK_NUM          5
+#define IMX6UL_CPUFREQ_CLK_NUM         7
+
+static int num_clks;
+static struct clk_bulk_data clks[] = {
+       { .id = "arm" },
+       { .id = "pll1_sys" },
+       { .id = "step" },
+       { .id = "pll1_sw" },
+       { .id = "pll2_pfd2_396m" },
+       { .id = "pll2_bus" },
+       { .id = "secondary_sel" },
+};
 
 static struct device *cpu_dev;
 static bool free_opp;
@@ -53,7 +67,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
 
        new_freq = freq_table[index].frequency;
        freq_hz = new_freq * 1000;
-       old_freq = clk_get_rate(arm_clk) / 1000;
+       old_freq = clk_get_rate(clks[ARM].clk) / 1000;
 
        opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
        if (IS_ERR(opp)) {
@@ -112,29 +126,35 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
                 * voltage of 528MHz, so lower the CPU frequency to one
                 * half before changing CPU frequency.
                 */
-               clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
-               clk_set_parent(pll1_sw_clk, pll1_sys_clk);
-               if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
-                       clk_set_parent(secondary_sel_clk, pll2_bus_clk);
+               clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000);
+               clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+               if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk))
+                       clk_set_parent(clks[SECONDARY_SEL].clk,
+                                      clks[PLL2_BUS].clk);
                else
-                       clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
-               clk_set_parent(step_clk, secondary_sel_clk);
-               clk_set_parent(pll1_sw_clk, step_clk);
+                       clk_set_parent(clks[SECONDARY_SEL].clk,
+                                      clks[PLL2_PFD2_396M].clk);
+               clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk);
+               clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+               if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) {
+                       clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+                       clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+               }
        } else {
-               clk_set_parent(step_clk, pll2_pfd2_396m_clk);
-               clk_set_parent(pll1_sw_clk, step_clk);
-               if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
-                       clk_set_rate(pll1_sys_clk, new_freq * 1000);
-                       clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+               clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk);
+               clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+               if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) {
+                       clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+                       clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
                } else {
                        /* pll1_sys needs to be enabled for divider rate change to work. */
                        pll1_sys_temp_enabled = true;
-                       clk_prepare_enable(pll1_sys_clk);
+                       clk_prepare_enable(clks[PLL1_SYS].clk);
                }
        }
 
        /* Ensure the arm clock divider is what we expect */
-       ret = clk_set_rate(arm_clk, new_freq * 1000);
+       ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
        if (ret) {
                dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
                regulator_set_voltage_tol(arm_reg, volt_old, 0);
@@ -143,7 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
 
        /* PLL1 is only needed until after ARM-PODF is set. */
        if (pll1_sys_temp_enabled)
-               clk_disable_unprepare(pll1_sys_clk);
+               clk_disable_unprepare(clks[PLL1_SYS].clk);
 
        /* scaling down?  scale voltage after frequency */
        if (new_freq < old_freq) {
@@ -174,7 +194,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret;
 
-       policy->clk = arm_clk;
+       policy->clk = clks[ARM].clk;
        ret = cpufreq_generic_init(policy, freq_table, transition_latency);
        policy->suspend_freq = policy->max;
 
@@ -226,23 +246,61 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
        val >>= OCOTP_CFG3_SPEED_SHIFT;
        val &= 0x3;
 
-       if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
-            of_machine_is_compatible("fsl,imx6q"))
-               if (dev_pm_opp_disable(dev, 1200000000))
-                       dev_warn(dev, "failed to disable 1.2GHz OPP\n");
        if (val < OCOTP_CFG3_SPEED_996MHZ)
                if (dev_pm_opp_disable(dev, 996000000))
                        dev_warn(dev, "failed to disable 996MHz OPP\n");
-       if (of_machine_is_compatible("fsl,imx6q")) {
+
+       if (of_machine_is_compatible("fsl,imx6q") ||
+           of_machine_is_compatible("fsl,imx6qp")) {
                if (val != OCOTP_CFG3_SPEED_852MHZ)
                        if (dev_pm_opp_disable(dev, 852000000))
                                dev_warn(dev, "failed to disable 852MHz OPP\n");
+               if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+                       if (dev_pm_opp_disable(dev, 1200000000))
+                               dev_warn(dev, "failed to disable 1.2GHz OPP\n");
        }
        iounmap(base);
 put_node:
        of_node_put(np);
 }
 
+#define OCOTP_CFG3_6UL_SPEED_696MHZ    0x2
+
+static void imx6ul_opp_check_speed_grading(struct device *dev)
+{
+       struct device_node *np;
+       void __iomem *base;
+       u32 val;
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
+       if (!np)
+               return;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               dev_err(dev, "failed to map ocotp\n");
+               goto put_node;
+       }
+
+       /*
+        * Speed GRADING[1:0] defines the max speed of ARM:
+        * 2b'00: Reserved;
+        * 2b'01: 528000000Hz;
+        * 2b'10: 696000000Hz;
+        * 2b'11: Reserved;
+        * We need to set the max speed of ARM according to fuse map.
+        */
+       val = readl_relaxed(base + OCOTP_CFG3);
+       val >>= OCOTP_CFG3_SPEED_SHIFT;
+       val &= 0x3;
+       if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+               if (dev_pm_opp_disable(dev, 696000000))
+                       dev_warn(dev, "failed to disable 696MHz OPP\n");
+       iounmap(base);
+put_node:
+       of_node_put(np);
+}
+
 static int imx6q_cpufreq_probe(struct platform_device *pdev)
 {
        struct device_node *np;
@@ -265,28 +323,15 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
                return -ENOENT;
        }
 
-       arm_clk = clk_get(cpu_dev, "arm");
-       pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
-       pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
-       step_clk = clk_get(cpu_dev, "step");
-       pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
-       if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
-           IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
-               dev_err(cpu_dev, "failed to get clocks\n");
-               ret = -ENOENT;
-               goto put_clk;
-       }
-
        if (of_machine_is_compatible("fsl,imx6ul") ||
-           of_machine_is_compatible("fsl,imx6ull")) {
-               pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
-               secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
-               if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
-                       dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
-                       ret = -ENOENT;
-                       goto put_clk;
-               }
-       }
+           of_machine_is_compatible("fsl,imx6ull"))
+               num_clks = IMX6UL_CPUFREQ_CLK_NUM;
+       else
+               num_clks = IMX6Q_CPUFREQ_CLK_NUM;
+
+       ret = clk_bulk_get(cpu_dev, num_clks, clks);
+       if (ret)
+               goto put_node;
 
        arm_reg = regulator_get(cpu_dev, "arm");
        pu_reg = regulator_get_optional(cpu_dev, "pu");
@@ -310,7 +355,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
                goto put_reg;
        }
 
-       imx6q_opp_check_speed_grading(cpu_dev);
+       if (of_machine_is_compatible("fsl,imx6ul"))
+               imx6ul_opp_check_speed_grading(cpu_dev);
+       else
+               imx6q_opp_check_speed_grading(cpu_dev);
 
        /* Because we have added the OPPs here, we must free them */
        free_opp = true;
@@ -423,22 +471,11 @@ put_reg:
                regulator_put(pu_reg);
        if (!IS_ERR(soc_reg))
                regulator_put(soc_reg);
-put_clk:
-       if (!IS_ERR(arm_clk))
-               clk_put(arm_clk);
-       if (!IS_ERR(pll1_sys_clk))
-               clk_put(pll1_sys_clk);
-       if (!IS_ERR(pll1_sw_clk))
-               clk_put(pll1_sw_clk);
-       if (!IS_ERR(step_clk))
-               clk_put(step_clk);
-       if (!IS_ERR(pll2_pfd2_396m_clk))
-               clk_put(pll2_pfd2_396m_clk);
-       if (!IS_ERR(pll2_bus_clk))
-               clk_put(pll2_bus_clk);
-       if (!IS_ERR(secondary_sel_clk))
-               clk_put(secondary_sel_clk);
+
+       clk_bulk_put(num_clks, clks);
+put_node:
        of_node_put(np);
+
        return ret;
 }
 
@@ -452,13 +489,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
        if (!IS_ERR(pu_reg))
                regulator_put(pu_reg);
        regulator_put(soc_reg);
-       clk_put(arm_clk);
-       clk_put(pll1_sys_clk);
-       clk_put(pll1_sw_clk);
-       clk_put(step_clk);
-       clk_put(pll2_pfd2_396m_clk);
-       clk_put(pll2_bus_clk);
-       clk_put(secondary_sel_clk);
+
+       clk_bulk_put(num_clks, clks);
 
        return 0;
 }
index 93a0e88..7edf7a0 100644 (file)
@@ -1595,15 +1595,6 @@ static const struct pstate_funcs knl_funcs = {
        .get_val = core_get_val,
 };
 
-static const struct pstate_funcs bxt_funcs = {
-       .get_max = core_get_max_pstate,
-       .get_max_physical = core_get_max_pstate_physical,
-       .get_min = core_get_min_pstate,
-       .get_turbo = core_get_turbo_pstate,
-       .get_scaling = core_get_scaling,
-       .get_val = core_get_val,
-};
-
 #define ICPU(model, policy) \
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
                        (unsigned long)&policy }
@@ -1627,8 +1618,9 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(INTEL_FAM6_BROADWELL_XEON_D,       core_funcs),
        ICPU(INTEL_FAM6_XEON_PHI_KNL,           knl_funcs),
        ICPU(INTEL_FAM6_XEON_PHI_KNM,           knl_funcs),
-       ICPU(INTEL_FAM6_ATOM_GOLDMONT,          bxt_funcs),
-       ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE,       bxt_funcs),
+       ICPU(INTEL_FAM6_ATOM_GOLDMONT,          core_funcs),
+       ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE,       core_funcs),
+       ICPU(INTEL_FAM6_SKYLAKE_X,              core_funcs),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
index c46a12d..5faa37c 100644 (file)
@@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
        if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
                longhaul_setup_voltagescaling();
 
-       policy->cpuinfo.transition_latency = 200000;    /* nsec */
+       policy->transition_delay_us = 200000;   /* usec */
 
        return cpufreq_table_validate_and_show(policy, longhaul_table);
 }
index 18c4bd9..8c04ddd 100644 (file)
@@ -310,28 +310,8 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
 static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
 {
        struct mtk_cpu_dvfs_info *info = policy->driver_data;
-       struct device_node *np = of_node_get(info->cpu_dev->of_node);
-       u32 capacitance = 0;
 
-       if (WARN_ON(!np))
-               return;
-
-       if (of_find_property(np, "#cooling-cells", NULL)) {
-               of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
-
-               info->cdev = of_cpufreq_power_cooling_register(np,
-                                               policy, capacitance, NULL);
-
-               if (IS_ERR(info->cdev)) {
-                       dev_err(info->cpu_dev,
-                               "running cpufreq without cooling device: %ld\n",
-                               PTR_ERR(info->cdev));
-
-                       info->cdev = NULL;
-               }
-       }
-
-       of_node_put(np);
+       info->cdev = of_cpufreq_cooling_register(policy);
 }
 
 static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
@@ -574,6 +554,7 @@ static struct platform_driver mtk_cpufreq_platdrv = {
 /* List of machines supported by this driver */
 static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
        { .compatible = "mediatek,mt2701", },
+       { .compatible = "mediatek,mt2712", },
        { .compatible = "mediatek,mt7622", },
        { .compatible = "mediatek,mt7623", },
        { .compatible = "mediatek,mt817x", },
@@ -620,3 +601,7 @@ static int __init mtk_cpufreq_driver_init(void)
        return 0;
 }
 device_initcall(mtk_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("MediaTek CPUFreq driver");
+MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
+MODULE_LICENSE("GPL v2");
index ed915ee..31513bd 100644 (file)
@@ -76,12 +76,6 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
                        return PTR_ERR(clk);
                }
 
-               /*
-                * In case of a failure of dev_pm_opp_add(), we don't
-                * bother with cleaning up the registered OPP (there's
-                * no function to do so), and simply cancel the
-                * registration of the cpufreq device.
-                */
                ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
                if (ret) {
                        clk_put(clk);
@@ -91,7 +85,8 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
                ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
                if (ret) {
                        clk_put(clk);
-                       return ret;
+                       dev_err(cpu_dev, "Failed to register OPPs\n");
+                       goto opp_register_failed;
                }
 
                ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
@@ -99,9 +94,16 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
                if (ret)
                        dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
                                __func__, ret);
+               clk_put(clk);
        }
 
        platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
        return 0;
+
+opp_register_failed:
+       /* As registering has failed remove all the opp for all cpus */
+       dev_pm_opp_cpumask_remove_table(cpu_possible_mask);
+
+       return ret;
 }
 device_initcall(armada_xp_pmsu_cpufreq_init);
index b6d7c4c..29cdec1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
+#include <linux/hashtable.h>
 #include <trace/events/power.h>
 
 #include <asm/cputhreads.h>
 #include <asm/opal.h>
 #include <linux/timer.h>
 
-#define POWERNV_MAX_PSTATES    256
+#define POWERNV_MAX_PSTATES_ORDER  8
+#define POWERNV_MAX_PSTATES    (1UL << (POWERNV_MAX_PSTATES_ORDER))
 #define PMSR_PSAFE_ENABLE      (1UL << 30)
 #define PMSR_SPR_EM_DISABLE    (1UL << 31)
-#define PMSR_MAX(x)            ((x >> 32) & 0xFF)
+#define MAX_PSTATE_SHIFT       32
 #define LPSTATE_SHIFT          48
 #define GPSTATE_SHIFT          56
-#define GET_LPSTATE(x)         (((x) >> LPSTATE_SHIFT) & 0xFF)
-#define GET_GPSTATE(x)         (((x) >> GPSTATE_SHIFT) & 0xFF)
 
 #define MAX_RAMP_DOWN_TIME                             5120
 /*
@@ -94,6 +94,27 @@ struct global_pstate_info {
 };
 
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+/**
+ * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
+ *                               indexed by a function of pstate id.
+ *
+ * @pstate_id: pstate id for this entry.
+ *
+ * @cpufreq_table_idx: Index into the powernv_freqs
+ *                    cpufreq_frequency_table for frequency
+ *                    corresponding to pstate_id.
+ *
+ * @hentry: hlist_node that hooks this entry into the pstate_revmap
+ *         hashtable
+ */
+struct pstate_idx_revmap_data {
+       u8 pstate_id;
+       unsigned int cpufreq_table_idx;
+       struct hlist_node hentry;
+};
+
 static bool rebooting, throttled, occ_reset;
 
 static const char * const throttle_reason[] = {
@@ -148,39 +169,56 @@ static struct powernv_pstate_info {
        bool wof_enabled;
 } powernv_pstate_info;
 
-/* Use following macros for conversions between pstate_id and index */
-static inline int idx_to_pstate(unsigned int i)
+static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
+{
+       return ((pmsr_val >> shift) & 0xFF);
+}
+
+#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
+#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
+#define extract_max_pstate(x)  extract_pstate(x, MAX_PSTATE_SHIFT)
+
+/* Use following functions for conversions between pstate_id and index */
+
+/**
+ * idx_to_pstate : Returns the pstate id corresponding to the
+ *                frequency in the cpufreq frequency table
+ *                powernv_freqs indexed by @i.
+ *
+ *                If @i is out of bound, this will return the pstate
+ *                corresponding to the nominal frequency.
+ */
+static inline u8 idx_to_pstate(unsigned int i)
 {
        if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
-               pr_warn_once("index %u is out of bound\n", i);
+               pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
                return powernv_freqs[powernv_pstate_info.nominal].driver_data;
        }
 
        return powernv_freqs[i].driver_data;
 }
 
-static inline unsigned int pstate_to_idx(int pstate)
+/**
+ * pstate_to_idx : Returns the index in the cpufreq frequencytable
+ *                powernv_freqs for the frequency whose corresponding
+ *                pstate id is @pstate.
+ *
+ *                If no frequency corresponding to @pstate is found,
+ *                this will return the index of the nominal
+ *                frequency.
+ */
+static unsigned int pstate_to_idx(u8 pstate)
 {
-       int min = powernv_freqs[powernv_pstate_info.min].driver_data;
-       int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+       unsigned int key = pstate % POWERNV_MAX_PSTATES;
+       struct pstate_idx_revmap_data *revmap_data;
 
-       if (min > 0) {
-               if (unlikely((pstate < max) || (pstate > min))) {
-                       pr_warn_once("pstate %d is out of bound\n", pstate);
-                       return powernv_pstate_info.nominal;
-               }
-       } else {
-               if (unlikely((pstate > max) || (pstate < min))) {
-                       pr_warn_once("pstate %d is out of bound\n", pstate);
-                       return powernv_pstate_info.nominal;
-               }
+       hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
+               if (revmap_data->pstate_id == pstate)
+                       return revmap_data->cpufreq_table_idx;
        }
-       /*
-        * abs() is deliberately used so that is works with
-        * both monotonically increasing and decreasing
-        * pstate values
-        */
-       return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
+
+       pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
+       return powernv_pstate_info.nominal;
 }
 
 static inline void reset_gpstates(struct cpufreq_policy *policy)
@@ -247,7 +285,7 @@ static int init_powernv_pstates(void)
                powernv_pstate_info.wof_enabled = true;
 
 next:
-       pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+       pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
                pstate_nominal, pstate_max);
        pr_info("Workload Optimized Frequency is %s in the platform\n",
                (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
@@ -278,19 +316,30 @@ next:
 
        powernv_pstate_info.nr_pstates = nr_pstates;
        pr_debug("NR PStates %d\n", nr_pstates);
+
        for (i = 0; i < nr_pstates; i++) {
                u32 id = be32_to_cpu(pstate_ids[i]);
                u32 freq = be32_to_cpu(pstate_freqs[i]);
+               struct pstate_idx_revmap_data *revmap_data;
+               unsigned int key;
 
                pr_debug("PState id %d freq %d MHz\n", id, freq);
                powernv_freqs[i].frequency = freq * 1000; /* kHz */
-               powernv_freqs[i].driver_data = id;
+               powernv_freqs[i].driver_data = id & 0xFF;
+
+               revmap_data = (struct pstate_idx_revmap_data *)
+                             kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+
+               revmap_data->pstate_id = id & 0xFF;
+               revmap_data->cpufreq_table_idx = i;
+               key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
+               hash_add(pstate_revmap, &revmap_data->hentry, key);
 
                if (id == pstate_max)
                        powernv_pstate_info.max = i;
-               else if (id == pstate_nominal)
+               if (id == pstate_nominal)
                        powernv_pstate_info.nominal = i;
-               else if (id == pstate_min)
+               if (id == pstate_min)
                        powernv_pstate_info.min = i;
 
                if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
@@ -307,14 +356,13 @@ next:
 }
 
 /* Returns the CPU frequency corresponding to the pstate_id. */
-static unsigned int pstate_id_to_freq(int pstate_id)
+static unsigned int pstate_id_to_freq(u8 pstate_id)
 {
        int i;
 
        i = pstate_to_idx(pstate_id);
        if (i >= powernv_pstate_info.nr_pstates || i < 0) {
-               pr_warn("PState id %d outside of PState table, "
-                       "reporting nominal id %d instead\n",
+               pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
                        pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
                i = powernv_pstate_info.nominal;
        }
@@ -420,8 +468,8 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
  */
 struct powernv_smp_call_data {
        unsigned int freq;
-       int pstate_id;
-       int gpstate_id;
+       u8 pstate_id;
+       u8 gpstate_id;
 };
 
 /*
@@ -438,22 +486,15 @@ struct powernv_smp_call_data {
 static void powernv_read_cpu_freq(void *arg)
 {
        unsigned long pmspr_val;
-       s8 local_pstate_id;
        struct powernv_smp_call_data *freq_data = arg;
 
        pmspr_val = get_pmspr(SPRN_PMSR);
-
-       /*
-        * The local pstate id corresponds bits 48..55 in the PMSR.
-        * Note: Watch out for the sign!
-        */
-       local_pstate_id = (pmspr_val >> 48) & 0xFF;
-       freq_data->pstate_id = local_pstate_id;
+       freq_data->pstate_id = extract_local_pstate(pmspr_val);
        freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
 
-       pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
-               raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
-               freq_data->freq);
+       pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
+                raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+                freq_data->freq);
 }
 
 /*
@@ -515,21 +556,21 @@ static void powernv_cpufreq_throttle_check(void *data)
        struct chip *chip;
        unsigned int cpu = smp_processor_id();
        unsigned long pmsr;
-       int pmsr_pmax;
+       u8 pmsr_pmax;
        unsigned int pmsr_pmax_idx;
 
        pmsr = get_pmspr(SPRN_PMSR);
        chip = this_cpu_read(chip_info);
 
        /* Check for Pmax Capping */
-       pmsr_pmax = (s8)PMSR_MAX(pmsr);
+       pmsr_pmax = extract_max_pstate(pmsr);
        pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
        if (pmsr_pmax_idx != powernv_pstate_info.max) {
                if (chip->throttled)
                        goto next;
                chip->throttled = true;
                if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
-                       pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
+                       pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
                                     cpu, chip->id, pmsr_pmax,
                                     idx_to_pstate(powernv_pstate_info.nominal));
                        chip->throttle_sub_turbo++;
@@ -645,8 +686,8 @@ void gpstate_timer_handler(struct timer_list *t)
         * value. Hence, read from PMCR to get correct data.
         */
        val = get_pmspr(SPRN_PMCR);
-       freq_data.gpstate_id = (s8)GET_GPSTATE(val);
-       freq_data.pstate_id = (s8)GET_LPSTATE(val);
+       freq_data.gpstate_id = extract_global_pstate(val);
+       freq_data.pstate_id = extract_local_pstate(val);
        if (freq_data.gpstate_id  == freq_data.pstate_id) {
                reset_gpstates(policy);
                spin_unlock(&gpstates->gpstate_lock);
index 4ada55b..0562761 100644 (file)
@@ -275,20 +275,8 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
 static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
 {
        struct cpu_data *cpud = policy->driver_data;
-       struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
 
-       if (of_find_property(np, "#cooling-cells", NULL)) {
-               cpud->cdev = of_cpufreq_cooling_register(np, policy);
-
-               if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
-                       pr_err("cpu%d is not running as cooling device: %ld\n",
-                                       policy->cpu, PTR_ERR(cpud->cdev));
-
-                       cpud->cdev = NULL;
-               }
-       }
-
-       of_node_put(np);
+       cpud->cdev = of_cpufreq_cooling_register(policy);
 }
 
 static struct cpufreq_driver qoriq_cpufreq_driver = {
index 05d2990..247fcbf 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/clk.h>
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/export.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
 #include <linux/pm_opp.h>
 #include <linux/scpi_protocol.h>
+#include <linux/slab.h>
 #include <linux/types.h>
 
-#include "arm_big_little.h"
+struct scpi_data {
+       struct clk *clk;
+       struct device *cpu_dev;
+       struct thermal_cooling_device *cdev;
+};
 
 static struct scpi_ops *scpi_ops;
 
-static int scpi_get_transition_latency(struct device *cpu_dev)
+static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+{
+       struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+       struct scpi_data *priv = policy->driver_data;
+       unsigned long rate = clk_get_rate(priv->clk);
+
+       return rate / 1000;
+}
+
+static int
+scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+       struct scpi_data *priv = policy->driver_data;
+       u64 rate = policy->freq_table[index].frequency * 1000;
+       int ret;
+
+       ret = clk_set_rate(priv->clk, rate);
+       if (!ret && (clk_get_rate(priv->clk) != rate))
+               ret = -EIO;
+
+       return ret;
+}
+
+static int
+scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
 {
-       return scpi_ops->get_transition_latency(cpu_dev);
+       int cpu, domain, tdomain;
+       struct device *tcpu_dev;
+
+       domain = scpi_ops->device_domain_id(cpu_dev);
+       if (domain < 0)
+               return domain;
+
+       for_each_possible_cpu(cpu) {
+               if (cpu == cpu_dev->id)
+                       continue;
+
+               tcpu_dev = get_cpu_device(cpu);
+               if (!tcpu_dev)
+                       continue;
+
+               tdomain = scpi_ops->device_domain_id(tcpu_dev);
+               if (tdomain == domain)
+                       cpumask_set_cpu(cpu, cpumask);
+       }
+
+       return 0;
 }
 
-static int scpi_init_opp_table(const struct cpumask *cpumask)
+static int scpi_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret;
-       struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+       unsigned int latency;
+       struct device *cpu_dev;
+       struct scpi_data *priv;
+       struct cpufreq_frequency_table *freq_table;
+
+       cpu_dev = get_cpu_device(policy->cpu);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu%d device\n", policy->cpu);
+               return -ENODEV;
+       }
 
        ret = scpi_ops->add_opps_to_device(cpu_dev);
        if (ret) {
@@ -46,32 +108,133 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
                return ret;
        }
 
-       ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
-       if (ret)
+       ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus);
+       if (ret) {
+               dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+               return ret;
+       }
+
+       ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+       if (ret) {
                dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
                        __func__, ret);
+               return ret;
+       }
+
+       ret = dev_pm_opp_get_opp_count(cpu_dev);
+       if (ret <= 0) {
+               dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+               ret = -EPROBE_DEFER;
+               goto out_free_opp;
+       }
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               ret = -ENOMEM;
+               goto out_free_opp;
+       }
+
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+       if (ret) {
+               dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+               goto out_free_priv;
+       }
+
+       priv->cpu_dev = cpu_dev;
+       priv->clk = clk_get(cpu_dev, NULL);
+       if (IS_ERR(priv->clk)) {
+               dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
+                       __func__, cpu_dev->id);
+               goto out_free_cpufreq_table;
+       }
+
+       policy->driver_data = priv;
+
+       ret = cpufreq_table_validate_and_show(policy, freq_table);
+       if (ret) {
+               dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
+                       ret);
+               goto out_put_clk;
+       }
+
+       /* scpi allows DVFS request for any domain from any CPU */
+       policy->dvfs_possible_from_any_cpu = true;
+
+       latency = scpi_ops->get_transition_latency(cpu_dev);
+       if (!latency)
+               latency = CPUFREQ_ETERNAL;
+
+       policy->cpuinfo.transition_latency = latency;
+
+       policy->fast_switch_possible = false;
+       return 0;
+
+out_put_clk:
+       clk_put(priv->clk);
+out_free_cpufreq_table:
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+       kfree(priv);
+out_free_opp:
+       dev_pm_opp_cpumask_remove_table(policy->cpus);
+
        return ret;
 }
 
-static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
-       .name   = "scpi",
-       .get_transition_latency = scpi_get_transition_latency,
-       .init_opp_table = scpi_init_opp_table,
-       .free_opp_table = dev_pm_opp_cpumask_remove_table,
+static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+{
+       struct scpi_data *priv = policy->driver_data;
+
+       cpufreq_cooling_unregister(priv->cdev);
+       clk_put(priv->clk);
+       dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+       kfree(priv);
+       dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+
+       return 0;
+}
+
+static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
+{
+       struct scpi_data *priv = policy->driver_data;
+       struct thermal_cooling_device *cdev;
+
+       cdev = of_cpufreq_cooling_register(policy);
+       if (!IS_ERR(cdev))
+               priv->cdev = cdev;
+}
+
+static struct cpufreq_driver scpi_cpufreq_driver = {
+       .name   = "scpi-cpufreq",
+       .flags  = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+                 CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+       .verify = cpufreq_generic_frequency_table_verify,
+       .attr   = cpufreq_generic_attr,
+       .get    = scpi_cpufreq_get_rate,
+       .init   = scpi_cpufreq_init,
+       .exit   = scpi_cpufreq_exit,
+       .ready  = scpi_cpufreq_ready,
+       .target_index   = scpi_cpufreq_set_target,
 };
 
 static int scpi_cpufreq_probe(struct platform_device *pdev)
 {
+       int ret;
+
        scpi_ops = get_scpi_ops();
        if (!scpi_ops)
                return -EIO;
 
-       return bL_cpufreq_register(&scpi_cpufreq_ops);
+       ret = cpufreq_register_driver(&scpi_cpufreq_driver);
+       if (ret)
+               dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
+                       __func__, ret);
+       return ret;
 }
 
 static int scpi_cpufreq_remove(struct platform_device *pdev)
 {
-       bL_cpufreq_unregister(&scpi_cpufreq_ops);
+       cpufreq_unregister_driver(&scpi_cpufreq_driver);
        scpi_ops = NULL;
        return 0;
 }
index 923317f..a099b7b 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/cpu.h>
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -50,6 +51,7 @@ struct ti_cpufreq_soc_data {
        unsigned long efuse_mask;
        unsigned long efuse_shift;
        unsigned long rev_offset;
+       bool multi_regulator;
 };
 
 struct ti_cpufreq_data {
@@ -57,6 +59,7 @@ struct ti_cpufreq_data {
        struct device_node *opp_node;
        struct regmap *syscon;
        const struct ti_cpufreq_soc_data *soc_data;
+       struct opp_table *opp_table;
 };
 
 static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -95,6 +98,7 @@ static struct ti_cpufreq_soc_data am3x_soc_data = {
        .efuse_offset = 0x07fc,
        .efuse_mask = 0x1fff,
        .rev_offset = 0x600,
+       .multi_regulator = false,
 };
 
 static struct ti_cpufreq_soc_data am4x_soc_data = {
@@ -103,6 +107,7 @@ static struct ti_cpufreq_soc_data am4x_soc_data = {
        .efuse_offset = 0x0610,
        .efuse_mask = 0x3f,
        .rev_offset = 0x600,
+       .multi_regulator = false,
 };
 
 static struct ti_cpufreq_soc_data dra7_soc_data = {
@@ -111,6 +116,7 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
        .efuse_mask = 0xf80000,
        .efuse_shift = 19,
        .rev_offset = 0x204,
+       .multi_regulator = true,
 };
 
 /**
@@ -195,12 +201,14 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
        {},
 };
 
-static int ti_cpufreq_init(void)
+static int ti_cpufreq_probe(struct platform_device *pdev)
 {
        u32 version[VERSION_COUNT];
        struct device_node *np;
        const struct of_device_id *match;
+       struct opp_table *ti_opp_table;
        struct ti_cpufreq_data *opp_data;
+       const char * const reg_names[] = {"vdd", "vbb"};
        int ret;
 
        np = of_find_node_by_path("/");
@@ -247,16 +255,29 @@ static int ti_cpufreq_init(void)
        if (ret)
                goto fail_put_node;
 
-       ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
-                                                         version, VERSION_COUNT));
-       if (ret) {
+       ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
+                                                  version, VERSION_COUNT);
+       if (IS_ERR(ti_opp_table)) {
                dev_err(opp_data->cpu_dev,
                        "Failed to set supported hardware\n");
+               ret = PTR_ERR(ti_opp_table);
                goto fail_put_node;
        }
 
-       of_node_put(opp_data->opp_node);
+       opp_data->opp_table = ti_opp_table;
+
+       if (opp_data->soc_data->multi_regulator) {
+               ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
+                                                        reg_names,
+                                                        ARRAY_SIZE(reg_names));
+               if (IS_ERR(ti_opp_table)) {
+                       dev_pm_opp_put_supported_hw(opp_data->opp_table);
+                       ret =  PTR_ERR(ti_opp_table);
+                       goto fail_put_node;
+               }
+       }
 
+       of_node_put(opp_data->opp_node);
 register_cpufreq_dt:
        platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
 
@@ -269,4 +290,22 @@ free_opp_data:
 
        return ret;
 }
-device_initcall(ti_cpufreq_init);
+
+static int ti_cpufreq_init(void)
+{
+       platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+       return 0;
+}
+module_init(ti_cpufreq_init);
+
+static struct platform_driver ti_cpufreq_driver = {
+       .probe = ti_cpufreq_probe,
+       .driver = {
+               .name = "ti-cpufreq",
+       },
+};
+module_platform_driver(ti_cpufreq_driver);
+
+MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
+MODULE_LICENSE("GPL v2");
index 4e78263..5d359af 100644 (file)
@@ -36,14 +36,15 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
 /**
  * cpuidle_switch_governor - changes the governor
  * @gov: the new target governor
- *
- * NOTE: "gov" can be NULL to specify disabled
  * Must be called with cpuidle_lock acquired.
  */
 int cpuidle_switch_governor(struct cpuidle_governor *gov)
 {
        struct cpuidle_device *dev;
 
+       if (!gov)
+               return -EINVAL;
+
        if (gov == cpuidle_curr_governor)
                return 0;
 
index 3e104f5..b56b3f7 100644 (file)
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
        select CRYPTO_SHA256
        select CRYPTO_SHA512
        select CRYPTO_AUTHENC
+       select CRYPTO_GF128MUL
        ---help---
          The Chelsio Crypto Co-processor driver for T6 adapters.
 
index 89ba9e8..4bcef78 100644 (file)
@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
                ndesc = ctx->handle_result(priv, ring, sreq->req,
                                           &should_complete, &ret);
                if (ndesc < 0) {
+                       kfree(sreq);
                        dev_err(priv->dev, "failed to handle result (%d)", ndesc);
                        return;
                }
index 5438552..fcc0a60 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <crypto/aes.h>
 #include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
 
 #include "safexcel.h"
 
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
        unsigned int key_len;
 };
 
+struct safexcel_cipher_req {
+       bool needs_inv;
+};
+
 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
                                  struct crypto_async_request *async,
                                  struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
        return 0;
 }
 
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
-                                 struct crypto_async_request *async,
-                                 bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+                                     struct crypto_async_request *async,
+                                     bool *should_complete, int *ret)
 {
        struct skcipher_request *req = skcipher_request_cast(async);
        struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
        spin_unlock_bh(&priv->ring[ring].egress_lock);
 
        request->req = &req->base;
-       ctx->base.handle_result = safexcel_handle_result;
 
        *commands = n_cdesc;
        *results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
 
        ring = safexcel_select_ring(priv);
        ctx->base.ring = ring;
-       ctx->base.needs_inv = false;
-       ctx->base.send = safexcel_aes_send;
 
        spin_lock_bh(&priv->ring[ring].queue_lock);
        enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
        return ndesc;
 }
 
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+                                 struct crypto_async_request *async,
+                                 bool *should_complete, int *ret)
+{
+       struct skcipher_request *req = skcipher_request_cast(async);
+       struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+       int err;
+
+       if (sreq->needs_inv) {
+               sreq->needs_inv = false;
+               err = safexcel_handle_inv_result(priv, ring, async,
+                                                should_complete, ret);
+       } else {
+               err = safexcel_handle_req_result(priv, ring, async,
+                                                should_complete, ret);
+       }
+
+       return err;
+}
+
 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
                                    int ring, struct safexcel_request *request,
                                    int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
        struct safexcel_crypto_priv *priv = ctx->priv;
        int ret;
 
-       ctx->base.handle_result = safexcel_handle_inv_result;
-
        ret = safexcel_invalidate_cache(async, &ctx->base, priv,
                                        ctx->base.ctxr_dma, ring, request);
        if (unlikely(ret))
@@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
        return 0;
 }
 
+static int safexcel_send(struct crypto_async_request *async,
+                        int ring, struct safexcel_request *request,
+                        int *commands, int *results)
+{
+       struct skcipher_request *req = skcipher_request_cast(async);
+       struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+       int ret;
+
+       if (sreq->needs_inv)
+               ret = safexcel_cipher_send_inv(async, ring, request,
+                                              commands, results);
+       else
+               ret = safexcel_aes_send(async, ring, request,
+                                       commands, results);
+       return ret;
+}
+
 static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
 {
        struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
        struct safexcel_crypto_priv *priv = ctx->priv;
-       struct skcipher_request req;
+       SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
+       struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
        struct safexcel_inv_result result = {};
        int ring = ctx->base.ring;
 
-       memset(&req, 0, sizeof(struct skcipher_request));
+       memset(req, 0, sizeof(struct skcipher_request));
 
        /* create invalidation request */
        init_completion(&result.completion);
-       skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                       safexcel_inv_complete, &result);
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                     safexcel_inv_complete, &result);
 
-       skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
-       ctx = crypto_tfm_ctx(req.base.tfm);
+       skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+       ctx = crypto_tfm_ctx(req->base.tfm);
        ctx->base.exit_inv = true;
-       ctx->base.send = safexcel_cipher_send_inv;
+       sreq->needs_inv = true;
 
        spin_lock_bh(&priv->ring[ring].queue_lock);
-       crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+       crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
        spin_unlock_bh(&priv->ring[ring].queue_lock);
 
        if (!priv->ring[ring].need_dequeue)
@@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
                        enum safexcel_cipher_direction dir, u32 mode)
 {
        struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+       struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
        struct safexcel_crypto_priv *priv = ctx->priv;
        int ret, ring;
 
+       sreq->needs_inv = false;
        ctx->direction = dir;
        ctx->mode = mode;
 
        if (ctx->base.ctxr) {
-               if (ctx->base.needs_inv)
-                       ctx->base.send = safexcel_cipher_send_inv;
+               if (ctx->base.needs_inv) {
+                       sreq->needs_inv = true;
+                       ctx->base.needs_inv = false;
+               }
        } else {
                ctx->base.ring = safexcel_select_ring(priv);
-               ctx->base.send = safexcel_aes_send;
-
                ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
                                                 EIP197_GFP_FLAGS(req->base),
                                                 &ctx->base.ctxr_dma);
@@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
                             alg.skcipher.base);
 
        ctx->priv = tmpl->priv;
+       ctx->base.send = safexcel_send;
+       ctx->base.handle_result = safexcel_handle_result;
+
+       crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+                                   sizeof(struct safexcel_cipher_req));
 
        return 0;
 }
index 74feb62..0c5a582 100644 (file)
@@ -32,9 +32,10 @@ struct safexcel_ahash_req {
        bool last_req;
        bool finish;
        bool hmac;
+       bool needs_inv;
 
        u8 state_sz;    /* expected sate size, only set once */
-       u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+       u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
 
        u64 len;
        u64 processed;
@@ -119,15 +120,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
        }
 }
 
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
-                                 struct crypto_async_request *async,
-                                 bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+                                     struct crypto_async_request *async,
+                                     bool *should_complete, int *ret)
 {
        struct safexcel_result_desc *rdesc;
        struct ahash_request *areq = ahash_request_cast(async);
        struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
        struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
-       int cache_len, result_sz = sreq->state_sz;
+       int cache_len;
 
        *ret = 0;
 
@@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
        spin_unlock_bh(&priv->ring[ring].egress_lock);
 
        if (sreq->finish)
-               result_sz = crypto_ahash_digestsize(ahash);
-       memcpy(sreq->state, areq->result, result_sz);
+               memcpy(areq->result, sreq->state,
+                      crypto_ahash_digestsize(ahash));
 
        dma_unmap_sg(priv->dev, areq->src,
                     sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
@@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
        return 1;
 }
 
-static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
-                              struct safexcel_request *request, int *commands,
-                              int *results)
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+                                  struct safexcel_request *request,
+                                  int *commands, int *results)
 {
        struct ahash_request *areq = ahash_request_cast(async);
        struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -273,7 +274,7 @@ send_command:
        /* Add the token */
        safexcel_hash_token(first_cdesc, len, req->state_sz);
 
-       ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
+       ctx->base.result_dma = dma_map_single(priv->dev, req->state,
                                              req->state_sz, DMA_FROM_DEVICE);
        if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
                ret = -EINVAL;
@@ -292,7 +293,6 @@ send_command:
 
        req->processed += len;
        request->req = &areq->base;
-       ctx->base.handle_result = safexcel_handle_result;
 
        *commands = n_cdesc;
        *results = 1;
@@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
 
        ring = safexcel_select_ring(priv);
        ctx->base.ring = ring;
-       ctx->base.needs_inv = false;
-       ctx->base.send = safexcel_ahash_send;
 
        spin_lock_bh(&priv->ring[ring].queue_lock);
        enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
        return 1;
 }
 
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+                                 struct crypto_async_request *async,
+                                 bool *should_complete, int *ret)
+{
+       struct ahash_request *areq = ahash_request_cast(async);
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       int err;
+
+       if (req->needs_inv) {
+               req->needs_inv = false;
+               err = safexcel_handle_inv_result(priv, ring, async,
+                                                should_complete, ret);
+       } else {
+               err = safexcel_handle_req_result(priv, ring, async,
+                                                should_complete, ret);
+       }
+
+       return err;
+}
+
 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
                                   int ring, struct safexcel_request *request,
                                   int *commands, int *results)
@@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
        struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
        int ret;
 
-       ctx->base.handle_result = safexcel_handle_inv_result;
        ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
                                        ctx->base.ctxr_dma, ring, request);
        if (unlikely(ret))
@@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
        return 0;
 }
 
+static int safexcel_ahash_send(struct crypto_async_request *async,
+                              int ring, struct safexcel_request *request,
+                              int *commands, int *results)
+{
+       struct ahash_request *areq = ahash_request_cast(async);
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       int ret;
+
+       if (req->needs_inv)
+               ret = safexcel_ahash_send_inv(async, ring, request,
+                                             commands, results);
+       else
+               ret = safexcel_ahash_send_req(async, ring, request,
+                                             commands, results);
+       return ret;
+}
+
 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
 {
        struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
        struct safexcel_crypto_priv *priv = ctx->priv;
-       struct ahash_request req;
+       AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+       struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
        struct safexcel_inv_result result = {};
        int ring = ctx->base.ring;
 
-       memset(&req, 0, sizeof(struct ahash_request));
+       memset(req, 0, sizeof(struct ahash_request));
 
        /* create invalidation request */
        init_completion(&result.completion);
-       ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                   safexcel_inv_complete, &result);
 
-       ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
-       ctx = crypto_tfm_ctx(req.base.tfm);
+       ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+       ctx = crypto_tfm_ctx(req->base.tfm);
        ctx->base.exit_inv = true;
-       ctx->base.send = safexcel_ahash_send_inv;
+       rctx->needs_inv = true;
 
        spin_lock_bh(&priv->ring[ring].queue_lock);
-       crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+       crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
        spin_unlock_bh(&priv->ring[ring].queue_lock);
 
        if (!priv->ring[ring].need_dequeue)
@@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
        struct safexcel_crypto_priv *priv = ctx->priv;
        int ret, ring;
 
-       ctx->base.send = safexcel_ahash_send;
+       req->needs_inv = false;
 
        if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
                ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
 
        if (ctx->base.ctxr) {
-               if (ctx->base.needs_inv)
-                       ctx->base.send = safexcel_ahash_send_inv;
+               if (ctx->base.needs_inv) {
+                       ctx->base.needs_inv = false;
+                       req->needs_inv = true;
+               }
        } else {
                ctx->base.ring = safexcel_select_ring(priv);
                ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
                             struct safexcel_alg_template, alg.ahash);
 
        ctx->priv = tmpl->priv;
+       ctx->base.send = safexcel_ahash_send;
+       ctx->base.handle_result = safexcel_handle_result;
 
        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
                                 sizeof(struct safexcel_ahash_req));
index 48de52c..662e709 100644 (file)
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
                                          CWQ_ENTRY_SIZE, 0, NULL);
        if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
                kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+               queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
                return -ENOMEM;
        }
        return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
 {
        kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
        kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+       queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+       queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
 }
 
 static long spu_queue_register_workfn(void *arg)
index 6833ada..7b0bf82 100644 (file)
@@ -428,9 +428,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
        return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
 }
 
+static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       struct file *filp = vma->vm_file;
+       struct dev_dax *dev_dax = filp->private_data;
+       struct dax_region *dax_region = dev_dax->region;
+
+       if (!IS_ALIGNED(addr, dax_region->align))
+               return -EINVAL;
+       return 0;
+}
+
 static const struct vm_operations_struct dax_vm_ops = {
        .fault = dev_dax_fault,
        .huge_fault = dev_dax_huge_fault,
+       .split = dev_dax_split,
 };
 
 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
index 78fb496..fe2af6a 100644 (file)
@@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
        devfreq = devfreq_add_device(dev, profile, governor_name, data);
        if (IS_ERR(devfreq)) {
                devres_free(ptr);
-               return ERR_PTR(-ENOMEM);
+               return devfreq;
        }
 
        *ptr = devfreq;
@@ -996,7 +996,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
        if (df->governor == governor) {
                ret = 0;
                goto out;
-       } else if (df->governor->immutable || governor->immutable) {
+       } else if ((df->governor && df->governor->immutable) ||
+                                       governor->immutable) {
                ret = -EINVAL;
                goto out;
        }
index fbab271..a861b5b 100644 (file)
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
                         unsigned long flags)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
-       struct data_chunk       *first = xt->sgl;
+       struct data_chunk       *first;
        struct at_desc          *desc = NULL;
        size_t                  xfer_count;
        unsigned int            dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
        if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
                return NULL;
 
+       first = xt->sgl;
+
        dev_info(chan2dev(chan),
                 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
                __func__, &xt->src_start, &xt->dst_start, xt->numf,
index d50273f..afd5e10 100644 (file)
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 
        ret = dma_async_device_register(dd);
        if (ret)
-               return ret;
+               goto err_clk;
 
        irq = platform_get_irq(pdev, 0);
        ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 
 err_unregister:
        dma_async_device_unregister(dd);
+err_clk:
+       clk_disable_unprepare(dmadev->clk);
        return ret;
 }
 
index 47edc7f..ec5f9d2 100644 (file)
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
 #define PATTERN_COUNT_MASK     0x1f
 #define PATTERN_MEMSET_IDX     0x01
 
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+       bool                    done;
+       wait_queue_head_t       *wait;
+};
+
 struct dmatest_thread {
        struct list_head        node;
        struct dmatest_info     *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
        u8                      **dsts;
        u8                      **udsts;
        enum dma_transaction_type type;
+       wait_queue_head_t done_wait;
+       struct dmatest_done test_done;
        bool                    done;
 };
 
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
        return error_count;
 }
 
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
-       bool                    done;
-       wait_queue_head_t       *wait;
-};
 
 static void dmatest_callback(void *arg)
 {
        struct dmatest_done *done = arg;
-
-       done->done = true;
-       wake_up_all(done->wait);
+       struct dmatest_thread *thread =
+               container_of(arg, struct dmatest_thread, done_wait);
+       if (!thread->done) {
+               done->done = true;
+               wake_up_all(done->wait);
+       } else {
+               /*
+                * If thread->done, it means that this callback occurred
+                * after the parent thread has cleaned up. This can
+                * happen in the case that driver doesn't implement
+                * the terminate_all() functionality and a dma operation
+                * did not occur within the timeout period
+                */
+               WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+       }
 }
 
 static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
  */
 static int dmatest_func(void *data)
 {
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
        struct dmatest_thread   *thread = data;
-       struct dmatest_done     done = { .wait = &done_wait };
+       struct dmatest_done     *done = &thread->test_done;
        struct dmatest_info     *info;
        struct dmatest_params   *params;
        struct dma_chan         *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
                        continue;
                }
 
-               done.done = false;
+               done->done = false;
                tx->callback = dmatest_callback;
-               tx->callback_param = &done;
+               tx->callback_param = done;
                cookie = tx->tx_submit(tx);
 
                if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
                }
                dma_async_issue_pending(chan);
 
-               wait_event_freezable_timeout(done_wait, done.done,
+               wait_event_freezable_timeout(thread->done_wait, done->done,
                                             msecs_to_jiffies(params->timeout));
 
                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
-               if (!done.done) {
-                       /*
-                        * We're leaving the timed out dma operation with
-                        * dangling pointer to done_wait.  To make this
-                        * correct, we'll need to allocate wait_done for
-                        * each test iteration and perform "who's gonna
-                        * free it this time?" dancing.  For now, just
-                        * leave it dangling.
-                        */
-                       WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+               if (!done->done) {
                        dmaengine_unmap_put(um);
                        result("test timed out", total_tests, src_off, dst_off,
                               len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
                dmatest_KBs(runtime, total_len), ret);
 
        /* terminate all transfers on specified channels */
-       if (ret)
+       if (ret || failed_tests)
                dmaengine_terminate_all(chan);
 
        thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
                thread->info = info;
                thread->chan = dtc->chan;
                thread->type = type;
+               thread->test_done.wait = &thread->done_wait;
+               init_waitqueue_head(&thread->done_wait);
                smp_wmb();
                thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
                                dma_chan_name(chan), op, i);
index 6775f2c..c756886 100644 (file)
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
        }
 }
 
-static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
 {
        int i;
 
-       for (i = 0; i < DMAMUX_NR; i++)
+       for (i = 0; i < nr_clocks; i++)
                clk_disable_unprepare(fsl_edma->muxclk[i]);
 }
 
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
                fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
-               if (IS_ERR(fsl_edma->muxbase[i]))
+               if (IS_ERR(fsl_edma->muxbase[i])) {
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
                        return PTR_ERR(fsl_edma->muxbase[i]);
+               }
 
                sprintf(clkname, "dmamux%d", i);
                fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
                if (IS_ERR(fsl_edma->muxclk[i])) {
                        dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
                        return PTR_ERR(fsl_edma->muxclk[i]);
                }
 
                ret = clk_prepare_enable(fsl_edma->muxclk[i]);
-               if (ret) {
-                       /* disable only clks which were enabled on error */
-                       for (; i >= 0; i--)
-                               clk_disable_unprepare(fsl_edma->muxclk[i]);
-
-                       dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
-                       return ret;
-               }
+               if (ret)
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
 
        }
 
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev,
                        "Can't register Freescale eDMA engine. (%d)\n", ret);
-               fsl_disable_clocks(fsl_edma);
+               fsl_disable_clocks(fsl_edma, DMAMUX_NR);
                return ret;
        }
 
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
                dev_err(&pdev->dev,
                        "Can't register Freescale eDMA of_dma. (%d)\n", ret);
                dma_async_device_unregister(&fsl_edma->dma_dev);
-               fsl_disable_clocks(fsl_edma);
+               fsl_disable_clocks(fsl_edma, DMAMUX_NR);
                return ret;
        }
 
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
        fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
        of_dma_controller_free(np);
        dma_async_device_unregister(&fsl_edma->dma_dev);
-       fsl_disable_clocks(fsl_edma);
+       fsl_disable_clocks(fsl_edma, DMAMUX_NR);
 
        return 0;
 }
index 2f31d3d..7792a91 100644 (file)
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
        if (memcmp(src, dest, IOAT_TEST_SIZE)) {
                dev_err(dev, "Self-test copy failed compare, disabling\n");
                err = -ENODEV;
-               goto free_resources;
+               goto unmap_dma;
        }
 
 unmap_dma:
index 2b2c7db..35c3936 100644 (file)
@@ -1615,22 +1615,6 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
  * Power management
  */
 
-#ifdef CONFIG_PM_SLEEP
-static int rcar_dmac_sleep_suspend(struct device *dev)
-{
-       /*
-        * TODO: Wait for the current transfer to complete and stop the device.
-        */
-       return 0;
-}
-
-static int rcar_dmac_sleep_resume(struct device *dev)
-{
-       /* TODO: Resume transfers, if any. */
-       return 0;
-}
-#endif
-
 #ifdef CONFIG_PM
 static int rcar_dmac_runtime_suspend(struct device *dev)
 {
@@ -1646,7 +1630,13 @@ static int rcar_dmac_runtime_resume(struct device *dev)
 #endif
 
 static const struct dev_pm_ops rcar_dmac_pm = {
-       SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
+       /*
+        * TODO for system sleep/resume:
+        *   - Wait for the current transfer to complete and stop the device,
+        *   - Resume transfers, if any.
+        */
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                    pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
                           NULL)
 };
index 981fba5..1621f2f 100644 (file)
@@ -24,8 +24,6 @@
 #include <linux/notifier.h>
 #include <linux/extcon-provider.h>
 #include <linux/regmap.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
 #include <linux/mfd/axp20x.h>
 
 /* Power source status register */
@@ -79,11 +77,6 @@ enum axp288_extcon_reg {
        AXP288_BC_DET_STAT_REG          = 0x2f,
 };
 
-enum axp288_mux_select {
-       EXTCON_GPIO_MUX_SEL_PMIC = 0,
-       EXTCON_GPIO_MUX_SEL_SOC,
-};
-
 enum axp288_extcon_irq {
        VBUS_FALLING_IRQ = 0,
        VBUS_RISING_IRQ,
@@ -104,10 +97,8 @@ struct axp288_extcon_info {
        struct device *dev;
        struct regmap *regmap;
        struct regmap_irq_chip_data *regmap_irqc;
-       struct gpio_desc *gpio_mux_cntl;
        int irq[EXTCON_IRQ_END];
        struct extcon_dev *edev;
-       struct notifier_block extcon_nb;
        unsigned int previous_cable;
 };
 
@@ -197,15 +188,6 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
        }
 
 no_vbus:
-       /*
-        * If VBUS is absent Connect D+/D- lines to PMIC for BC
-        * detection. Else connect them to SOC for USB communication.
-        */
-       if (info->gpio_mux_cntl)
-               gpiod_set_value(info->gpio_mux_cntl,
-                       vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
-                                       : EXTCON_GPIO_MUX_SEL_PMIC);
-
        extcon_set_state_sync(info->edev, info->previous_cable, false);
        if (info->previous_cable == EXTCON_CHG_USB_SDP)
                extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -253,8 +235,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
 {
        struct axp288_extcon_info *info;
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
-       struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
-       int ret, i, pirq, gpio;
+       int ret, i, pirq;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
@@ -264,8 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
        info->regmap = axp20x->regmap;
        info->regmap_irqc = axp20x->regmap_irqc;
        info->previous_cable = EXTCON_NONE;
-       if (pdata)
-               info->gpio_mux_cntl = pdata->gpio_mux_cntl;
 
        platform_set_drvdata(pdev, info);
 
@@ -286,21 +265,11 @@ static int axp288_extcon_probe(struct platform_device *pdev)
                return ret;
        }
 
-       /* Set up gpio control for USB Mux */
-       if (info->gpio_mux_cntl) {
-               gpio = desc_to_gpio(info->gpio_mux_cntl);
-               ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
-               if (ret < 0) {
-                       dev_err(&pdev->dev,
-                               "failed to request the gpio=%d\n", gpio);
-                       return ret;
-               }
-               gpiod_direction_output(info->gpio_mux_cntl,
-                                               EXTCON_GPIO_MUX_SEL_PMIC);
-       }
-
        for (i = 0; i < EXTCON_IRQ_END; i++) {
                pirq = platform_get_irq(pdev, i);
+               if (pirq < 0)
+                       return pirq;
+
                info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
                if (info->irq[i] < 0) {
                        dev_err(&pdev->dev,
index 6187f73..6721ab0 100644 (file)
@@ -34,16 +34,26 @@ struct cros_ec_extcon_info {
 
        struct notifier_block notifier;
 
+       unsigned int dr; /* data role */
+       bool pr; /* power role (true if VBUS enabled) */
        bool dp; /* DisplayPort enabled */
        bool mux; /* SuperSpeed (usb3) enabled */
        unsigned int power_type;
 };
 
 static const unsigned int usb_type_c_cable[] = {
+       EXTCON_USB,
+       EXTCON_USB_HOST,
        EXTCON_DISP_DP,
        EXTCON_NONE,
 };
 
+enum usb_data_roles {
+       DR_NONE,
+       DR_HOST,
+       DR_DEVICE,
+};
+
 /**
  * cros_ec_pd_command() - Send a command to the EC.
  * @info: pointer to struct cros_ec_extcon_info
@@ -150,6 +160,7 @@ static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
        pd_control.port = info->port_id;
        pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
        pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+       pd_control.swap = USB_PD_CTRL_SWAP_NONE;
        ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
                                 &pd_control, sizeof(pd_control),
                                 &resp, sizeof(resp));
@@ -183,11 +194,72 @@ static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
        return resp.num_ports;
 }
 
+static const char *cros_ec_usb_role_string(unsigned int role)
+{
+       return role == DR_NONE ? "DISCONNECTED" :
+               (role == DR_HOST ? "DFP" : "UFP");
+}
+
+static const char *cros_ec_usb_power_type_string(unsigned int type)
+{
+       switch (type) {
+       case USB_CHG_TYPE_NONE:
+               return "USB_CHG_TYPE_NONE";
+       case USB_CHG_TYPE_PD:
+               return "USB_CHG_TYPE_PD";
+       case USB_CHG_TYPE_PROPRIETARY:
+               return "USB_CHG_TYPE_PROPRIETARY";
+       case USB_CHG_TYPE_C:
+               return "USB_CHG_TYPE_C";
+       case USB_CHG_TYPE_BC12_DCP:
+               return "USB_CHG_TYPE_BC12_DCP";
+       case USB_CHG_TYPE_BC12_CDP:
+               return "USB_CHG_TYPE_BC12_CDP";
+       case USB_CHG_TYPE_BC12_SDP:
+               return "USB_CHG_TYPE_BC12_SDP";
+       case USB_CHG_TYPE_OTHER:
+               return "USB_CHG_TYPE_OTHER";
+       case USB_CHG_TYPE_VBUS:
+               return "USB_CHG_TYPE_VBUS";
+       case USB_CHG_TYPE_UNKNOWN:
+               return "USB_CHG_TYPE_UNKNOWN";
+       default:
+               return "USB_CHG_TYPE_UNKNOWN";
+       }
+}
+
+static bool cros_ec_usb_power_type_is_wall_wart(unsigned int type,
+                                               unsigned int role)
+{
+       switch (type) {
+       /* FIXME : Guppy, Donnettes, and other chargers will be miscategorized
+        * because they identify with USB_CHG_TYPE_C, but we can't return true
+        * here from that code because that breaks Suzy-Q and other kinds of
+        * USB Type-C cables and peripherals.
+        */
+       case USB_CHG_TYPE_PROPRIETARY:
+       case USB_CHG_TYPE_BC12_DCP:
+               return true;
+       case USB_CHG_TYPE_PD:
+       case USB_CHG_TYPE_C:
+       case USB_CHG_TYPE_BC12_CDP:
+       case USB_CHG_TYPE_BC12_SDP:
+       case USB_CHG_TYPE_OTHER:
+       case USB_CHG_TYPE_VBUS:
+       case USB_CHG_TYPE_UNKNOWN:
+       case USB_CHG_TYPE_NONE:
+       default:
+               return false;
+       }
+}
+
 static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
                                       bool force)
 {
        struct device *dev = info->dev;
        int role, power_type;
+       unsigned int dr = DR_NONE;
+       bool pr = false;
        bool polarity = false;
        bool dp = false;
        bool mux = false;
@@ -206,9 +278,12 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
                        dev_err(dev, "failed getting role err = %d\n", role);
                        return role;
                }
+               dev_dbg(dev, "disconnected\n");
        } else {
                int pd_mux_state;
 
+               dr = (role & PD_CTRL_RESP_ROLE_DATA) ? DR_HOST : DR_DEVICE;
+               pr = (role & PD_CTRL_RESP_ROLE_POWER);
                pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
                if (pd_mux_state < 0)
                        pd_mux_state = USB_PD_MUX_USB_ENABLED;
@@ -216,20 +291,62 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
                dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
                mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
                hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
-       }
 
-       if (force || info->dp != dp || info->mux != mux ||
-               info->power_type != power_type) {
+               dev_dbg(dev,
+                       "connected role 0x%x pwr type %d dr %d pr %d pol %d mux %d dp %d hpd %d\n",
+                       role, power_type, dr, pr, polarity, mux, dp, hpd);
+       }
 
+       /*
+        * When there is no USB host (e.g. USB PD charger),
+        * we are not really a UFP for the AP.
+        */
+       if (dr == DR_DEVICE &&
+           cros_ec_usb_power_type_is_wall_wart(power_type, role))
+               dr = DR_NONE;
+
+       if (force || info->dr != dr || info->pr != pr || info->dp != dp ||
+           info->mux != mux || info->power_type != power_type) {
+               bool host_connected = false, device_connected = false;
+
+               dev_dbg(dev, "Type/Role switch! type = %s role = %s\n",
+                       cros_ec_usb_power_type_string(power_type),
+                       cros_ec_usb_role_string(dr));
+               info->dr = dr;
+               info->pr = pr;
                info->dp = dp;
                info->mux = mux;
                info->power_type = power_type;
 
-               extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+               if (dr == DR_DEVICE)
+                       device_connected = true;
+               else if (dr == DR_HOST)
+                       host_connected = true;
 
+               extcon_set_state(info->edev, EXTCON_USB, device_connected);
+               extcon_set_state(info->edev, EXTCON_USB_HOST, host_connected);
+               extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+               extcon_set_property(info->edev, EXTCON_USB,
+                                   EXTCON_PROP_USB_VBUS,
+                                   (union extcon_property_value)(int)pr);
+               extcon_set_property(info->edev, EXTCON_USB_HOST,
+                                   EXTCON_PROP_USB_VBUS,
+                                   (union extcon_property_value)(int)pr);
+               extcon_set_property(info->edev, EXTCON_USB,
+                                   EXTCON_PROP_USB_TYPEC_POLARITY,
+                                   (union extcon_property_value)(int)polarity);
+               extcon_set_property(info->edev, EXTCON_USB_HOST,
+                                   EXTCON_PROP_USB_TYPEC_POLARITY,
+                                   (union extcon_property_value)(int)polarity);
                extcon_set_property(info->edev, EXTCON_DISP_DP,
                                    EXTCON_PROP_USB_TYPEC_POLARITY,
                                    (union extcon_property_value)(int)polarity);
+               extcon_set_property(info->edev, EXTCON_USB,
+                                   EXTCON_PROP_USB_SS,
+                                   (union extcon_property_value)(int)mux);
+               extcon_set_property(info->edev, EXTCON_USB_HOST,
+                                   EXTCON_PROP_USB_SS,
+                                   (union extcon_property_value)(int)mux);
                extcon_set_property(info->edev, EXTCON_DISP_DP,
                                    EXTCON_PROP_USB_SS,
                                    (union extcon_property_value)(int)mux);
@@ -237,6 +354,8 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
                                    EXTCON_PROP_DISP_HPD,
                                    (union extcon_property_value)(int)hpd);
 
+               extcon_sync(info->edev, EXTCON_USB);
+               extcon_sync(info->edev, EXTCON_USB_HOST);
                extcon_sync(info->edev, EXTCON_DISP_DP);
 
        } else if (hpd) {
@@ -322,13 +441,28 @@ static int extcon_cros_ec_probe(struct platform_device *pdev)
                return ret;
        }
 
+       extcon_set_property_capability(info->edev, EXTCON_USB,
+                                      EXTCON_PROP_USB_VBUS);
+       extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+                                      EXTCON_PROP_USB_VBUS);
+       extcon_set_property_capability(info->edev, EXTCON_USB,
+                                      EXTCON_PROP_USB_TYPEC_POLARITY);
+       extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+                                      EXTCON_PROP_USB_TYPEC_POLARITY);
        extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
                                       EXTCON_PROP_USB_TYPEC_POLARITY);
+       extcon_set_property_capability(info->edev, EXTCON_USB,
+                                      EXTCON_PROP_USB_SS);
+       extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+                                      EXTCON_PROP_USB_SS);
        extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
                                       EXTCON_PROP_USB_SS);
        extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
                                       EXTCON_PROP_DISP_HPD);
 
+       info->dr = DR_NONE;
+       info->pr = false;
+
        platform_set_drvdata(pdev, info);
 
        /* Get PD events from the EC */
index dfb373c..7da9f1b 100644 (file)
@@ -28,7 +28,6 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/bitmap.h>
-#include <linux/bitfield.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/export.h>
 
 #define MAX_DVFS_DOMAINS       8
 #define MAX_DVFS_OPPS          16
-
-#define PROTO_REV_MAJOR_MASK   GENMASK(31, 16)
-#define PROTO_REV_MINOR_MASK   GENMASK(15, 0)
-
-#define FW_REV_MAJOR_MASK      GENMASK(31, 24)
-#define FW_REV_MINOR_MASK      GENMASK(23, 16)
-#define FW_REV_PATCH_MASK      GENMASK(15, 0)
+#define DVFS_LATENCY(hdr)      (le32_to_cpu(hdr) >> 16)
+#define DVFS_OPP_COUNT(hdr)    ((le32_to_cpu(hdr) >> 8) & 0xff)
+
+#define PROTOCOL_REV_MINOR_BITS        16
+#define PROTOCOL_REV_MINOR_MASK        ((1U << PROTOCOL_REV_MINOR_BITS) - 1)
+#define PROTOCOL_REV_MAJOR(x)  ((x) >> PROTOCOL_REV_MINOR_BITS)
+#define PROTOCOL_REV_MINOR(x)  ((x) & PROTOCOL_REV_MINOR_MASK)
+
+#define FW_REV_MAJOR_BITS      24
+#define FW_REV_MINOR_BITS      16
+#define FW_REV_PATCH_MASK      ((1U << FW_REV_MINOR_BITS) - 1)
+#define FW_REV_MINOR_MASK      ((1U << FW_REV_MAJOR_BITS) - 1)
+#define FW_REV_MAJOR(x)                ((x) >> FW_REV_MAJOR_BITS)
+#define FW_REV_MINOR(x)                (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS)
+#define FW_REV_PATCH(x)                ((x) & FW_REV_PATCH_MASK)
 
 #define MAX_RX_TIMEOUT         (msecs_to_jiffies(30))
 
@@ -304,6 +311,10 @@ struct clk_get_info {
        u8 name[20];
 } __packed;
 
+struct clk_get_value {
+       __le32 rate;
+} __packed;
+
 struct clk_set_value {
        __le16 id;
        __le16 reserved;
@@ -317,9 +328,7 @@ struct legacy_clk_set_value {
 } __packed;
 
 struct dvfs_info {
-       u8 domain;
-       u8 opp_count;
-       __le16 latency;
+       __le32 header;
        struct {
                __le32 freq;
                __le32 m_volt;
@@ -342,6 +351,11 @@ struct _scpi_sensor_info {
        char name[20];
 };
 
+struct sensor_value {
+       __le32 lo_val;
+       __le32 hi_val;
+} __packed;
+
 struct dev_pstate_set {
        __le16 dev_id;
        u8 pstate;
@@ -405,20 +419,19 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
                unsigned int len;
 
                if (scpi_info->is_legacy) {
-                       struct legacy_scpi_shared_mem __iomem *mem =
-                                                       ch->rx_payload;
+                       struct legacy_scpi_shared_mem *mem = ch->rx_payload;
 
                        /* RX Length is not replied by the legacy Firmware */
                        len = match->rx_len;
 
-                       match->status = ioread32(&mem->status);
+                       match->status = le32_to_cpu(mem->status);
                        memcpy_fromio(match->rx_buf, mem->payload, len);
                } else {
-                       struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+                       struct scpi_shared_mem *mem = ch->rx_payload;
 
                        len = min(match->rx_len, CMD_SIZE(cmd));
 
-                       match->status = ioread32(&mem->status);
+                       match->status = le32_to_cpu(mem->status);
                        memcpy_fromio(match->rx_buf, mem->payload, len);
                }
 
@@ -432,11 +445,11 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
 static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
 {
        struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
-       struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+       struct scpi_shared_mem *mem = ch->rx_payload;
        u32 cmd = 0;
 
        if (!scpi_info->is_legacy)
-               cmd = ioread32(&mem->command);
+               cmd = le32_to_cpu(mem->command);
 
        scpi_process_cmd(ch, cmd);
 }
@@ -446,7 +459,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
        unsigned long flags;
        struct scpi_xfer *t = msg;
        struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
-       struct scpi_shared_mem __iomem *mem = ch->tx_payload;
+       struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
 
        if (t->tx_buf) {
                if (scpi_info->is_legacy)
@@ -465,7 +478,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
        }
 
        if (!scpi_info->is_legacy)
-               iowrite32(t->cmd, &mem->command);
+               mem->command = cpu_to_le32(t->cmd);
 }
 
 static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -570,13 +583,13 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
 static unsigned long scpi_clk_get_val(u16 clk_id)
 {
        int ret;
-       __le32 rate;
+       struct clk_get_value clk;
        __le16 le_clk_id = cpu_to_le16(clk_id);
 
        ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
-                               sizeof(le_clk_id), &rate, sizeof(rate));
+                               sizeof(le_clk_id), &clk, sizeof(clk));
 
-       return ret ? ret : le32_to_cpu(rate);
+       return ret ? ret : le32_to_cpu(clk.rate);
 }
 
 static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
@@ -631,35 +644,35 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
 }
 
 static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
-{
-       if (domain >= MAX_DVFS_DOMAINS)
-               return ERR_PTR(-EINVAL);
-
-       return scpi_info->dvfs[domain] ?: ERR_PTR(-EINVAL);
-}
-
-static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
 {
        struct scpi_dvfs_info *info;
        struct scpi_opp *opp;
        struct dvfs_info buf;
        int ret, i;
 
+       if (domain >= MAX_DVFS_DOMAINS)
+               return ERR_PTR(-EINVAL);
+
+       if (scpi_info->dvfs[domain])    /* data already populated */
+               return scpi_info->dvfs[domain];
+
        ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
                                &buf, sizeof(buf));
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
-       info = devm_kmalloc(dev, sizeof(*info), GFP_KERNEL);
+       info = kmalloc(sizeof(*info), GFP_KERNEL);
        if (!info)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
-       info->count = buf.opp_count;
-       info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */
+       info->count = DVFS_OPP_COUNT(buf.header);
+       info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */
 
-       info->opps = devm_kcalloc(dev, info->count, sizeof(*opp), GFP_KERNEL);
-       if (!info->opps)
-               return -ENOMEM;
+       info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
+       if (!info->opps) {
+               kfree(info);
+               return ERR_PTR(-ENOMEM);
+       }
 
        for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
                opp->freq = le32_to_cpu(buf.opps[i].freq);
@@ -669,15 +682,7 @@ static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
        sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
 
        scpi_info->dvfs[domain] = info;
-       return 0;
-}
-
-static void scpi_dvfs_populate(struct device *dev)
-{
-       int domain;
-
-       for (domain = 0; domain < MAX_DVFS_DOMAINS; domain++)
-               scpi_dvfs_populate_info(dev, domain);
+       return info;
 }
 
 static int scpi_dev_domain_id(struct device *dev)
@@ -708,6 +713,9 @@ static int scpi_dvfs_get_transition_latency(struct device *dev)
        if (IS_ERR(info))
                return PTR_ERR(info);
 
+       if (!info->latency)
+               return 0;
+
        return info->latency;
 }
 
@@ -768,19 +776,20 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
 static int scpi_sensor_get_value(u16 sensor, u64 *val)
 {
        __le16 id = cpu_to_le16(sensor);
-       __le64 value;
+       struct sensor_value buf;
        int ret;
 
        ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
-                               &value, sizeof(value));
+                               &buf, sizeof(buf));
        if (ret)
                return ret;
 
        if (scpi_info->is_legacy)
-               /* only 32-bits supported, upper 32 bits can be junk */
-               *val = le32_to_cpup((__le32 *)&value);
+               /* only 32-bits supported, hi_val can be junk */
+               *val = le32_to_cpu(buf.lo_val);
        else
-               *val = le64_to_cpu(value);
+               *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
+                       le32_to_cpu(buf.lo_val);
 
        return 0;
 }
@@ -853,19 +862,23 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
 static ssize_t protocol_version_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%lu.%lu\n",
-               FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
-               FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version));
+       struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d.%d\n",
+                      PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
+                      PROTOCOL_REV_MINOR(scpi_info->protocol_version));
 }
 static DEVICE_ATTR_RO(protocol_version);
 
 static ssize_t firmware_version_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%lu.%lu.%lu\n",
-                    FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
-                    FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
-                    FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
+       struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d.%d.%d\n",
+                      FW_REV_MAJOR(scpi_info->firmware_version),
+                      FW_REV_MINOR(scpi_info->firmware_version),
+                      FW_REV_PATCH(scpi_info->firmware_version));
 }
 static DEVICE_ATTR_RO(firmware_version);
 
@@ -876,13 +889,39 @@ static struct attribute *versions_attrs[] = {
 };
 ATTRIBUTE_GROUPS(versions);
 
-static void scpi_free_channels(void *data)
+static void
+scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count)
 {
-       struct scpi_drvinfo *info = data;
        int i;
 
-       for (i = 0; i < info->num_chans; i++)
-               mbox_free_channel(info->channels[i].chan);
+       for (i = 0; i < count && pchan->chan; i++, pchan++) {
+               mbox_free_channel(pchan->chan);
+               devm_kfree(dev, pchan->xfers);
+               devm_iounmap(dev, pchan->rx_payload);
+       }
+}
+
+static int scpi_remove(struct platform_device *pdev)
+{
+       int i;
+       struct device *dev = &pdev->dev;
+       struct scpi_drvinfo *info = platform_get_drvdata(pdev);
+
+       scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
+
+       of_platform_depopulate(dev);
+       sysfs_remove_groups(&dev->kobj, versions_groups);
+       scpi_free_channels(dev, info->channels, info->num_chans);
+       platform_set_drvdata(pdev, NULL);
+
+       for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
+               kfree(info->dvfs[i]->opps);
+               kfree(info->dvfs[i]);
+       }
+       devm_kfree(dev, info->channels);
+       devm_kfree(dev, info);
+
+       return 0;
 }
 
 #define MAX_SCPI_XFERS         10
@@ -913,6 +952,7 @@ static int scpi_probe(struct platform_device *pdev)
 {
        int count, idx, ret;
        struct resource res;
+       struct scpi_chan *scpi_chan;
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
 
@@ -929,19 +969,13 @@ static int scpi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
-                                          GFP_KERNEL);
-       if (!scpi_info->channels)
+       scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL);
+       if (!scpi_chan)
                return -ENOMEM;
 
-       ret = devm_add_action(dev, scpi_free_channels, scpi_info);
-       if (ret)
-               return ret;
-
-       for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
+       for (idx = 0; idx < count; idx++) {
                resource_size_t size;
-               int idx = scpi_info->num_chans;
-               struct scpi_chan *pchan = scpi_info->channels + idx;
+               struct scpi_chan *pchan = scpi_chan + idx;
                struct mbox_client *cl = &pchan->cl;
                struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
 
@@ -949,14 +983,15 @@ static int scpi_probe(struct platform_device *pdev)
                of_node_put(shmem);
                if (ret) {
                        dev_err(dev, "failed to get SCPI payload mem resource\n");
-                       return ret;
+                       goto err;
                }
 
                size = resource_size(&res);
                pchan->rx_payload = devm_ioremap(dev, res.start, size);
                if (!pchan->rx_payload) {
                        dev_err(dev, "failed to ioremap SCPI payload\n");
-                       return -EADDRNOTAVAIL;
+                       ret = -EADDRNOTAVAIL;
+                       goto err;
                }
                pchan->tx_payload = pchan->rx_payload + (size >> 1);
 
@@ -982,11 +1017,17 @@ static int scpi_probe(struct platform_device *pdev)
                                dev_err(dev, "failed to get channel%d err %d\n",
                                        idx, ret);
                }
+err:
+               scpi_free_channels(dev, scpi_chan, idx);
+               scpi_info = NULL;
                return ret;
        }
 
+       scpi_info->channels = scpi_chan;
+       scpi_info->num_chans = count;
        scpi_info->commands = scpi_std_commands;
-       scpi_info->scpi_ops = &scpi_ops;
+
+       platform_set_drvdata(pdev, scpi_info);
 
        if (scpi_info->is_legacy) {
                /* Replace with legacy variants */
@@ -1002,23 +1043,23 @@ static int scpi_probe(struct platform_device *pdev)
        ret = scpi_init_versions(scpi_info);
        if (ret) {
                dev_err(dev, "incorrect or no SCP firmware found\n");
+               scpi_remove(pdev);
                return ret;
        }
 
-       scpi_dvfs_populate(dev);
-
-       _dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
-                 FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
-                 FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version),
-                 FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
-                 FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
-                 FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
+       _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n",
+                 PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
+                 PROTOCOL_REV_MINOR(scpi_info->protocol_version),
+                 FW_REV_MAJOR(scpi_info->firmware_version),
+                 FW_REV_MINOR(scpi_info->firmware_version),
+                 FW_REV_PATCH(scpi_info->firmware_version));
+       scpi_info->scpi_ops = &scpi_ops;
 
-       ret = devm_device_add_groups(dev, versions_groups);
+       ret = sysfs_create_groups(&dev->kobj, versions_groups);
        if (ret)
                dev_err(dev, "unable to create sysfs version group\n");
 
-       return devm_of_platform_populate(dev);
+       return of_platform_populate(dev->of_node, NULL, NULL, dev);
 }
 
 static const struct of_device_id scpi_of_match[] = {
@@ -1035,6 +1076,7 @@ static struct platform_driver scpi_driver = {
                .of_match_table = scpi_of_match,
        },
        .probe = scpi_probe,
+       .remove = scpi_remove,
 };
 module_platform_driver(scpi_driver);
 
index ec8ac5c..055e2e8 100644 (file)
 
 #define NO_FURTHER_WRITE_ACTION -1
 
-#ifndef phys_to_page
-#define phys_to_page(x)                pfn_to_page((x) >> PAGE_SHIFT)
-#endif
-
 /**
  * efi_free_all_buff_pages - free all previous allocated buffer pages
  * @cap_info: pointer to current instance of capsule_info structure
@@ -35,7 +31,7 @@
 static void efi_free_all_buff_pages(struct capsule_info *cap_info)
 {
        while (cap_info->index > 0)
-               __free_page(phys_to_page(cap_info->pages[--cap_info->index]));
+               __free_page(cap_info->pages[--cap_info->index]);
 
        cap_info->index = NO_FURTHER_WRITE_ACTION;
 }
@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
 
        cap_info->pages = temp_page;
 
+       temp_page = krealloc(cap_info->phys,
+                            pages_needed * sizeof(phys_addr_t *),
+                            GFP_KERNEL | __GFP_ZERO);
+       if (!temp_page)
+               return -ENOMEM;
+
+       cap_info->phys = temp_page;
+
        return 0;
 }
 
@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
  **/
 static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
 {
+       bool do_vunmap = false;
        int ret;
 
-       ret = efi_capsule_update(&cap_info->header, cap_info->pages);
+       /*
+        * cap_info->capsule may have been assigned already by a quirk
+        * handler, so only overwrite it if it is NULL
+        */
+       if (!cap_info->capsule) {
+               cap_info->capsule = vmap(cap_info->pages, cap_info->index,
+                                        VM_MAP, PAGE_KERNEL);
+               if (!cap_info->capsule)
+                       return -ENOMEM;
+               do_vunmap = true;
+       }
+
+       ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
+       if (do_vunmap)
+               vunmap(cap_info->capsule);
        if (ret) {
                pr_err("capsule update failed\n");
                return ret;
@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
                        goto failed;
                }
 
-               cap_info->pages[cap_info->index++] = page_to_phys(page);
+               cap_info->pages[cap_info->index] = page;
+               cap_info->phys[cap_info->index] = page_to_phys(page);
                cap_info->page_bytes_remain = PAGE_SIZE;
+               cap_info->index++;
        } else {
-               page = phys_to_page(cap_info->pages[cap_info->index - 1]);
+               page = cap_info->pages[cap_info->index - 1];
        }
 
        kbuff = kmap(page);
@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
        struct capsule_info *cap_info = file->private_data;
 
        kfree(cap_info->pages);
+       kfree(cap_info->phys);
        kfree(file->private_data);
        file->private_data = NULL;
        return 0;
@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
                return -ENOMEM;
        }
 
+       cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+       if (!cap_info->phys) {
+               kfree(cap_info->pages);
+               kfree(cap_info);
+               return -ENOMEM;
+       }
+
        file->private_data = cap_info;
 
        return 0;
index f70febf..557a478 100644 (file)
@@ -109,6 +109,8 @@ struct kobject *efi_kobj;
 /*
  * Let's not leave out systab information that snuck into
  * the efivars driver
+ * Note, do not add more fields in systab sysfs file as it breaks sysfs
+ * one value per file rule!
  */
 static ssize_t systab_show(struct kobject *kobj,
                           struct kobj_attribute *attr, char *buf)
@@ -143,8 +145,7 @@ static ssize_t systab_show(struct kobject *kobj,
        return str - buf;
 }
 
-static struct kobj_attribute efi_attr_systab =
-                       __ATTR(systab, 0400, systab_show, NULL);
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
 
 #define EFI_FIELD(var) efi.var
 
index bd7ed3c..c47e0c6 100644 (file)
@@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
 };
 
 /* Generic ESRT Entry ("ESRE") support. */
-static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
 {
        char *str = buf;
 
@@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
        return str - buf;
 }
 
-static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
-       esre_fw_class_show, NULL);
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
 
 #define esre_attr_decl(name, size, fmt) \
-static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
 { \
        return sprintf(buf, fmt "\n", \
                       le##size##_to_cpu(entry->esre.esre1->name)); \
 } \
 \
-static struct esre_attribute esre_##name = __ATTR(name, 0400, \
-       esre_##name##_show, NULL)
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
 
 esre_attr_decl(fw_type, 32, "%u");
 esre_attr_decl(fw_version, 32, "%u");
@@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
 
 /* support for displaying ESRT fields at the top level */
 #define esrt_attr_decl(name, size, fmt) \
-static ssize_t esrt_##name##_show(struct kobject *kobj, \
+static ssize_t name##_show(struct kobject *kobj, \
                                  struct kobj_attribute *attr, char *buf)\
 { \
        return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
 } \
 \
-static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
-       esrt_##name##_show, NULL)
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
 
 esrt_attr_decl(fw_resource_count, 32, "%u");
 esrt_attr_decl(fw_resource_count_max, 32, "%u");
@@ -431,7 +428,7 @@ err_remove_group:
 err_remove_esrt:
        kobject_put(esrt_kobj);
 err:
-       kfree(esrt);
+       memunmap(esrt);
        esrt = NULL;
        return error;
 }
index 8e64b77..f377609 100644 (file)
@@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
        return map_attr->show(entry, buf);
 }
 
-static struct map_attribute map_type_attr = __ATTR_RO(type);
-static struct map_attribute map_phys_addr_attr   = __ATTR_RO(phys_addr);
-static struct map_attribute map_virt_addr_attr  = __ATTR_RO(virt_addr);
-static struct map_attribute map_num_pages_attr  = __ATTR_RO(num_pages);
-static struct map_attribute map_attribute_attr  = __ATTR_RO(attribute);
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
 
 /*
  * These are default attributes that are added for every memmap entry.
index 35e553b..e4b40f2 100644 (file)
@@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       return vpd_sections_init(entry.cbmem_addr);
+       vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
+       if (!vpd_kobj)
+               return -ENOMEM;
+
+       ret = vpd_sections_init(entry.cbmem_addr);
+       if (ret) {
+               kobject_put(vpd_kobj);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int vpd_remove(struct platform_device *pdev)
+{
+       vpd_section_destroy(&ro_vpd);
+       vpd_section_destroy(&rw_vpd);
+
+       kobject_put(vpd_kobj);
+
+       return 0;
 }
 
 static struct platform_driver vpd_driver = {
        .probe = vpd_probe,
+       .remove = vpd_remove,
        .driver = {
                .name = "vpd",
        },
 };
 
+static struct platform_device *vpd_pdev;
+
 static int __init vpd_platform_init(void)
 {
-       struct platform_device *pdev;
-
-       pdev = platform_device_register_simple("vpd", -1, NULL, 0);
-       if (IS_ERR(pdev))
-               return PTR_ERR(pdev);
+       int ret;
 
-       vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
-       if (!vpd_kobj)
-               return -ENOMEM;
+       ret = platform_driver_register(&vpd_driver);
+       if (ret)
+               return ret;
 
-       platform_driver_register(&vpd_driver);
+       vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0);
+       if (IS_ERR(vpd_pdev)) {
+               platform_driver_unregister(&vpd_driver);
+               return PTR_ERR(vpd_pdev);
+       }
 
        return 0;
 }
 
 static void __exit vpd_platform_exit(void)
 {
-       vpd_section_destroy(&ro_vpd);
-       vpd_section_destroy(&rw_vpd);
-       kobject_put(vpd_kobj);
+       platform_device_unregister(vpd_pdev);
+       platform_driver_unregister(&vpd_driver);
 }
 
 module_init(vpd_platform_init);
index f3f4f81..bb1c068 100644 (file)
@@ -77,8 +77,8 @@ static int psci_ops_check(void)
        return 0;
 }
 
-static int find_clusters(const struct cpumask *cpus,
-                        const struct cpumask **clusters)
+static int find_cpu_groups(const struct cpumask *cpus,
+                          const struct cpumask **cpu_groups)
 {
        unsigned int nb = 0;
        cpumask_var_t tmp;
@@ -88,11 +88,11 @@ static int find_clusters(const struct cpumask *cpus,
        cpumask_copy(tmp, cpus);
 
        while (!cpumask_empty(tmp)) {
-               const struct cpumask *cluster =
+               const struct cpumask *cpu_group =
                        topology_core_cpumask(cpumask_any(tmp));
 
-               clusters[nb++] = cluster;
-               cpumask_andnot(tmp, tmp, cluster);
+               cpu_groups[nb++] = cpu_group;
+               cpumask_andnot(tmp, tmp, cpu_group);
        }
 
        free_cpumask_var(tmp);
@@ -170,24 +170,24 @@ static int hotplug_tests(void)
 {
        int err;
        cpumask_var_t offlined_cpus;
-       int i, nb_cluster;
-       const struct cpumask **clusters;
+       int i, nb_cpu_group;
+       const struct cpumask **cpu_groups;
        char *page_buf;
 
        err = -ENOMEM;
        if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
                return err;
-       /* We may have up to nb_available_cpus clusters. */
-       clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
-                                GFP_KERNEL);
-       if (!clusters)
+       /* We may have up to nb_available_cpus cpu_groups. */
+       cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
+                                  GFP_KERNEL);
+       if (!cpu_groups)
                goto out_free_cpus;
        page_buf = (char *)__get_free_page(GFP_KERNEL);
        if (!page_buf)
-               goto out_free_clusters;
+               goto out_free_cpu_groups;
 
        err = 0;
-       nb_cluster = find_clusters(cpu_online_mask, clusters);
+       nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
 
        /*
         * Of course the last CPU cannot be powered down and cpu_down() should
@@ -197,24 +197,22 @@ static int hotplug_tests(void)
        err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
 
        /*
-        * Take down CPUs by cluster this time. When the last CPU is turned
-        * off, the cluster itself should shut down.
+        * Take down CPUs by cpu group this time. When the last CPU is turned
+        * off, the cpu group itself should shut down.
         */
-       for (i = 0; i < nb_cluster; ++i) {
-               int cluster_id =
-                       topology_physical_package_id(cpumask_any(clusters[i]));
+       for (i = 0; i < nb_cpu_group; ++i) {
                ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
-                                                     clusters[i]);
+                                                     cpu_groups[i]);
                /* Remove trailing newline. */
                page_buf[len - 1] = '\0';
-               pr_info("Trying to turn off and on again cluster %d "
-                       "(CPUs %s)\n", cluster_id, page_buf);
-               err += down_and_up_cpus(clusters[i], offlined_cpus);
+               pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+                       i, page_buf);
+               err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
        }
 
        free_page((unsigned long)page_buf);
-out_free_clusters:
-       kfree(clusters);
+out_free_cpu_groups:
+       kfree(cpu_groups);
 out_free_cpus:
        free_cpumask_var(offlined_cpus);
        return err;
index 5cfe39f..deb4830 100644 (file)
@@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
 {
        pr_debug("fw_cfg: unloading.\n");
        fw_cfg_sysfs_cache_cleanup();
+       sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+       fw_cfg_io_cleanup();
        fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
        fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
-       fw_cfg_io_cleanup();
        return 0;
 }
 
index 6b535ec..15a1f4b 100644 (file)
@@ -23,6 +23,7 @@
 struct gen_74x164_chip {
        struct gpio_chip        gpio_chip;
        struct mutex            lock;
+       struct gpio_desc        *gpiod_oe;
        u32                     registers;
        /*
         * Since the registers are chained, every byte sent will make
@@ -31,8 +32,7 @@ struct gen_74x164_chip {
         * register at the end of the transfer. So, to have a logical
         * numbering, store the bytes in reverse order.
         */
-       u8                      buffer[0];
-       struct gpio_desc        *gpiod_oe;
+       u8                      buffer[];
 };
 
 static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
index dfcf56e..76861a0 100644 (file)
@@ -522,6 +522,7 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = {
  * category than their parents, so it won't report false recursion.
  */
 static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
 
 static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
                                 irq_hw_number_t hwirq)
@@ -531,7 +532,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
        ret = irq_set_chip_data(irq, d->host_data);
        if (ret < 0)
                return ret;
-       irq_set_lockdep_class(irq, &gpio_lock_class);
+       irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
        irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
        irq_set_noprobe(irq);
 
index 545d43a..bb4f8cf 100644 (file)
@@ -327,6 +327,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
  * category than their parents, so it won't report false recursion.
  */
 static struct lock_class_key brcmstb_gpio_irq_lock_class;
+static struct lock_class_key brcmstb_gpio_irq_request_class;
 
 
 static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
@@ -346,7 +347,8 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
        ret = irq_set_chip_data(irq, &bank->gc);
        if (ret < 0)
                return ret;
-       irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
+       irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
+                             &brcmstb_gpio_irq_request_class);
        irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
        irq_set_noprobe(irq);
        return 0;
index f75d844..e4b3d7d 100644 (file)
@@ -383,7 +383,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
        u32 mask;
 
        d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
-       g = (struct davinci_gpio_regs __iomem *)d->regs;
+       g = (struct davinci_gpio_regs __iomem *)d->regs[0];
        mask = __gpio_mask(data->irq - d->base_irq);
 
        if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
index dd67a31..c38624e 100644 (file)
@@ -9,6 +9,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/acpi.h>
 #include <linux/bitops.h>
 #include <linux/gpio/driver.h>
 #include <linux/init.h>
@@ -380,9 +381,16 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
        }
 }
 
+static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+{
+       const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
+       return dev_name ? dev_name : "pinctrl-merrifield";
+}
+
 static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        const struct mrfld_gpio_pinrange *range;
+       const char *pinctrl_dev_name;
        struct mrfld_gpio *priv;
        u32 gpio_base, irq_base;
        void __iomem *base;
@@ -439,10 +447,11 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
                return retval;
        }
 
+       pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
        for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
                range = &mrfld_gpio_ranges[i];
                retval = gpiochip_add_pin_range(&priv->chip,
-                                               "pinctrl-merrifield",
+                                               pinctrl_dev_name,
                                                range->gpio_base,
                                                range->pin_base,
                                                range->npins);
index f9042bc..7b14d62 100644 (file)
@@ -152,14 +152,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
 {
        unsigned long get_mask = 0;
        unsigned long set_mask = 0;
-       int bit = 0;
 
-       while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) {
-               if (gc->bgpio_dir & BIT(bit))
-                       set_mask |= BIT(bit);
-               else
-                       get_mask |= BIT(bit);
-       }
+       /* Make sure we first clear any bits that are zero when we read the register */
+       *bits &= ~*mask;
+
+       /* Exploit the fact that we know which directions are set */
+       set_mask = *mask & gc->bgpio_dir;
+       get_mask = *mask & ~gc->bgpio_dir;
 
        if (set_mask)
                *bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -176,13 +175,13 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
 
 /*
  * This only works if the bits in the GPIO register are in native endianness.
- * It is dirt simple and fast in this case. (Also the most common case.)
  */
 static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
                              unsigned long *bits)
 {
-
-       *bits = gc->read_reg(gc->reg_dat) & *mask;
+       /* Make sure we first clear any bits that are zero when we read the register */
+       *bits &= ~*mask;
+       *bits |= gc->read_reg(gc->reg_dat) & *mask;
        return 0;
 }
 
@@ -196,9 +195,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
        unsigned long val;
        int bit;
 
+       /* Make sure we first clear any bits that are zero when we read the register */
+       *bits &= ~*mask;
+
        /* Create a mirrored mask */
-       bit = 0;
-       while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio)
+       bit = -1;
+       while ((bit = find_next_bit(mask, gc->ngpio, bit + 1)) < gc->ngpio)
                readmask |= bgpio_line2mask(gc, bit);
 
        /* Read the register */
@@ -208,8 +210,8 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
         * Mirror the result into the "bits" result, this will give line 0
         * in bit 0 ... line 31 in bit 31 for a 32bit register.
         */
-       bit = 0;
-       while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio)
+       bit = -1;
+       while ((bit = find_next_bit(&val, gc->ngpio, bit + 1)) < gc->ngpio)
                *bits |= bgpio_line2mask(gc, bit);
 
        return 0;
index babb7bd..a0a5f97 100644 (file)
@@ -947,7 +947,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
        { .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
        { .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
 
-       { .compatible = "onsemi,pca9654", .data = OF_953X( 8, PCA_INT), },
+       { .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
 
        { .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
        { }
index 23e771d..e85903e 100644 (file)
@@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
        struct gpio_reg *r = to_gpio_reg(gc);
        int irq = r->irqs[offset];
 
-       if (irq >= 0 && r->irq.domain)
-               irq = irq_find_mapping(r->irq.domain, irq);
+       if (irq >= 0 && r->irqdomain)
+               irq = irq_find_mapping(r->irqdomain, irq);
 
        return irq;
 }
index 8db47f6..02fa8fe 100644 (file)
@@ -565,6 +565,7 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
  * than their parents, so it won't report false recursion.
  */
 static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
 
 static int tegra_gpio_probe(struct platform_device *pdev)
 {
@@ -670,7 +671,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
 
                bank = &tgi->bank_info[GPIO_BANK(gpio)];
 
-               irq_set_lockdep_class(irq, &gpio_lock_class);
+               irq_set_lockdep_class(irq, &gpio_lock_class,
+                                     &gpio_request_class);
                irq_set_chip_data(irq, bank);
                irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
        }
index 2313af8..acd5911 100644 (file)
@@ -139,7 +139,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
 
 static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
                                         struct irq_data *irq_data,
-                                        bool early)
+                                        bool reserve)
 {
        struct xgene_gpio_sb *priv = d->host_data;
        u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
index eb4528c..d6f3d9e 100644 (file)
@@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
        }
 
        if (!chip->names)
-               devprop_gpiochip_set_names(chip);
+               devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent));
 
        acpi_gpiochip_request_regions(acpi_gpio);
        acpi_gpiochip_scan_gpios(acpi_gpio);
index 27f383b..f748aa3 100644 (file)
 /**
  * devprop_gpiochip_set_names - Set GPIO line names using device properties
  * @chip: GPIO chip whose lines should be named, if possible
+ * @fwnode: Property Node containing the gpio-line-names property
  *
  * Looks for device property "gpio-line-names" and if it exists assigns
  * GPIO line names for the chip. The memory allocated for the assigned
  * names belong to the underlying firmware node and should not be released
  * by the caller.
  */
-void devprop_gpiochip_set_names(struct gpio_chip *chip)
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+                               const struct fwnode_handle *fwnode)
 {
        struct gpio_device *gdev = chip->gpiodev;
        const char **names;
        int ret, i;
 
-       if (!chip->parent) {
-               dev_warn(&gdev->dev, "GPIO chip parent is NULL\n");
-               return;
-       }
-
-       ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+       ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
                                                NULL, 0);
        if (ret < 0)
                return;
 
        if (ret != gdev->ngpio) {
-               dev_warn(chip->parent,
+               dev_warn(&gdev->dev,
                         "names %d do not match number of GPIOs %d\n", ret,
                         gdev->ngpio);
                return;
@@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip)
        if (!names)
                return;
 
-       ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+       ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
                                                names, gdev->ngpio);
        if (ret < 0) {
-               dev_warn(chip->parent, "failed to read GPIO line names\n");
+               dev_warn(&gdev->dev, "failed to read GPIO line names\n");
                kfree(names);
                return;
        }
index e0d59e6..72a0695 100644 (file)
@@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
        /* If the chip defines names itself, these take precedence */
        if (!chip->names)
-               devprop_gpiochip_set_names(chip);
+               devprop_gpiochip_set_names(chip,
+                                          of_fwnode_handle(chip->of_node));
 
        of_node_get(chip->of_node);
 
index aad84a6..14532d9 100644 (file)
@@ -73,7 +73,8 @@ LIST_HEAD(gpio_devices);
 
 static void gpiochip_free_hogs(struct gpio_chip *chip);
 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
-                               struct lock_class_key *key);
+                               struct lock_class_key *lock_key,
+                               struct lock_class_key *request_key);
 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
 static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -1100,7 +1101,8 @@ static void gpiochip_setup_devs(void)
 }
 
 int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
-                              struct lock_class_key *key)
+                              struct lock_class_key *lock_key,
+                              struct lock_class_key *request_key)
 {
        unsigned long   flags;
        int             status = 0;
@@ -1246,7 +1248,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        if (status)
                goto err_remove_from_list;
 
-       status = gpiochip_add_irqchip(chip, key);
+       status = gpiochip_add_irqchip(chip, lock_key, request_key);
        if (status)
                goto err_remove_chip;
 
@@ -1632,7 +1634,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
         * This lock class tells lockdep that GPIO irqs are in a different
         * category than their parents, so it won't report false recursion.
         */
-       irq_set_lockdep_class(irq, chip->irq.lock_key);
+       irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
        irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
        /* Chips that use nested thread handlers have them marked */
        if (chip->irq.threaded)
@@ -1712,10 +1714,12 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
 /**
  * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
  * @gpiochip: the GPIO chip to add the IRQ chip to
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
  */
 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
-                               struct lock_class_key *lock_key)
+                               struct lock_class_key *lock_key,
+                               struct lock_class_key *request_key)
 {
        struct irq_chip *irqchip = gpiochip->irq.chip;
        const struct irq_domain_ops *ops;
@@ -1753,6 +1757,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
        gpiochip->to_irq = gpiochip_to_irq;
        gpiochip->irq.default_type = type;
        gpiochip->irq.lock_key = lock_key;
+       gpiochip->irq.request_key = request_key;
 
        if (gpiochip->irq.domain_ops)
                ops = gpiochip->irq.domain_ops;
@@ -1850,7 +1855,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
  * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
  * to have the core avoid setting up any default type in the hardware.
  * @threaded: whether this irqchip uses a nested thread handler
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
  *
  * This function closely associates a certain irqchip with a certain
  * gpiochip, providing an irq domain to translate the local IRQs to
@@ -1872,7 +1878,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
                             irq_flow_handler_t handler,
                             unsigned int type,
                             bool threaded,
-                            struct lock_class_key *lock_key)
+                            struct lock_class_key *lock_key,
+                            struct lock_class_key *request_key)
 {
        struct device_node *of_node;
 
@@ -1913,6 +1920,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
        gpiochip->irq.default_type = type;
        gpiochip->to_irq = gpiochip_to_irq;
        gpiochip->irq.lock_key = lock_key;
+       gpiochip->irq.request_key = request_key;
        gpiochip->irq.domain = irq_domain_add_simple(of_node,
                                        gpiochip->ngpio, first_irq,
                                        &gpiochip_domain_ops, gpiochip);
@@ -1940,7 +1948,8 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
 static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
-                                      struct lock_class_key *key)
+                                      struct lock_class_key *lock_key,
+                                      struct lock_class_key *request_key)
 {
        return 0;
 }
@@ -2883,6 +2892,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
 }
 EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
 
+/**
+ * gpiod_set_value_nocheck() - set a GPIO line value without checking
+ * @desc: the descriptor to set the value on
+ * @value: value to set
+ *
+ * This sets the value of a GPIO line backing a descriptor, applying
+ * different semantic quirks like active low and open drain/source
+ * handling.
+ */
+static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+{
+       if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+               value = !value;
+       if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+               gpio_set_open_drain_value_commit(desc, value);
+       else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+               gpio_set_open_source_value_commit(desc, value);
+       else
+               gpiod_set_raw_value_commit(desc, value);
+}
+
 /**
  * gpiod_set_value() - assign a gpio's value
  * @desc: gpio whose value will be assigned
@@ -2897,16 +2927,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
 void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        VALIDATE_DESC_VOID(desc);
-       /* Should be using gpiod_set_value_cansleep() */
        WARN_ON(desc->gdev->chip->can_sleep);
-       if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
-               value = !value;
-       if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
-               gpio_set_open_drain_value_commit(desc, value);
-       else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
-               gpio_set_open_source_value_commit(desc, value);
-       else
-               gpiod_set_raw_value_commit(desc, value);
+       gpiod_set_value_nocheck(desc, value);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_value);
 
@@ -3234,9 +3256,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
 {
        might_sleep_if(extra_checks);
        VALIDATE_DESC_VOID(desc);
-       if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
-               value = !value;
-       gpiod_set_raw_value_commit(desc, value);
+       gpiod_set_value_nocheck(desc, value);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
 
index af48322..6c44d16 100644 (file)
@@ -228,7 +228,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
        return desc - &desc->gdev->descs[0];
 }
 
-void devprop_gpiochip_set_names(struct gpio_chip *chip);
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+                               const struct fwnode_handle *fwnode);
 
 /* With descriptor prefix */
 
index 8a08e81..d4176a3 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the ACP, which is a sub-component
 # of AMDSOC/AMDGPU drm driver.
 # It provides the HW control for ACP related functionalities.
index 78d6091..90202cf 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 #
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
index 5afaf60..0b14b53 100644 (file)
@@ -717,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
                          struct amdgpu_queue_mgr *mgr);
 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
                         struct amdgpu_queue_mgr *mgr,
-                        int hw_ip, int instance, int ring,
+                        u32 hw_ip, u32 instance, u32 ring,
                         struct amdgpu_ring **out_ring);
 
 /*
@@ -1572,18 +1572,14 @@ struct amdgpu_device {
        /* sdma */
        struct amdgpu_sdma              sdma;
 
-       union {
-               struct {
-                       /* uvd */
-                       struct amdgpu_uvd               uvd;
+       /* uvd */
+       struct amdgpu_uvd               uvd;
 
-                       /* vce */
-                       struct amdgpu_vce               vce;
-               };
+       /* vce */
+       struct amdgpu_vce               vce;
 
-               /* vcn */
-               struct amdgpu_vcn               vcn;
-       };
+       /* vcn */
+       struct amdgpu_vcn               vcn;
 
        /* firmwares */
        struct amdgpu_firmware          firmware;
index 47d1c13..1e3e9be 100644 (file)
@@ -379,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct cik_sdma_rlc_registers *m;
+       unsigned long end_jiffies;
        uint32_t sdma_base_addr;
+       uint32_t data;
 
        m = get_sdma_mqd(mqd);
        sdma_base_addr = get_sdma_base_addr(m);
 
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
-                       m->sdma_rlc_virtual_addr);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+               m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
 
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
-                       m->sdma_rlc_rb_base);
+       end_jiffies = msecs_to_jiffies(2000) + jiffies;
+       while (true) {
+               data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+               if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+                       break;
+               if (time_after(jiffies, end_jiffies))
+                       return -ETIME;
+               usleep_range(500, 1000);
+       }
+       if (m->sdma_engine_id) {
+               data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+               data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+                               RESUME_CTX, 0);
+               WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+       } else {
+               data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+               data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+                               RESUME_CTX, 0);
+               WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+       }
 
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+                               m->sdma_rlc_doorbell);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+                               m->sdma_rlc_virtual_addr);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
                        m->sdma_rlc_rb_base_hi);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
                        m->sdma_rlc_rb_rptr_addr_lo);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
                        m->sdma_rlc_rb_rptr_addr_hi);
-
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
-                       m->sdma_rlc_doorbell);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
                        m->sdma_rlc_rb_cntl);
 
@@ -574,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
        }
 
        WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+               RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+               SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
 
        return 0;
 }
index a57cec7..57abf7a 100644 (file)
@@ -409,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
                if (candidate->robj == validated)
                        break;
 
+               /* We can't move pinned BOs here */
+               if (bo->pin_count)
+                       continue;
+
                other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 
                /* Check if this BO is in one of the domains we need space for */
index 2c57437..3573ecd 100644 (file)
@@ -1837,9 +1837,6 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                adev->ip_blocks[i].status.hw = false;
        }
 
-       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
-               amdgpu_ucode_fini_bo(adev);
-
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
index ec96bb1..c2f414f 100644 (file)
@@ -536,7 +536,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        /* Raven */
-       {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 
        {0, 0, 0}
 };
index 6c570d4..f8edf54 100644 (file)
@@ -1,4 +1,6 @@
 /*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  * to deal in the Software without restriction, including without limitation
index 033fba2..5f5aa5f 100644 (file)
@@ -164,6 +164,9 @@ static int amdgpu_pp_hw_fini(void *handle)
                ret = adev->powerplay.ip_funcs->hw_fini(
                                        adev->powerplay.pp_handle);
 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+               amdgpu_ucode_fini_bo(adev);
+
        return ret;
 }
 
index 7714f4a..447d446 100644 (file)
@@ -442,6 +442,8 @@ static int psp_hw_fini(void *handle)
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                return 0;
 
+       amdgpu_ucode_fini_bo(adev);
+
        psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
        amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
index 190e28c..93d8661 100644 (file)
@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
 
 static int amdgpu_identity_map(struct amdgpu_device *adev,
                               struct amdgpu_queue_mapper *mapper,
-                              int ring,
+                              u32 ring,
                               struct amdgpu_ring **out_ring)
 {
        switch (mapper->hw_ip) {
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
 
 static int amdgpu_lru_map(struct amdgpu_device *adev,
                          struct amdgpu_queue_mapper *mapper,
-                         int user_ring, bool lru_pipe_order,
+                         u32 user_ring, bool lru_pipe_order,
                          struct amdgpu_ring **out_ring)
 {
        int r, i, j;
@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
  */
 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
                         struct amdgpu_queue_mgr *mgr,
-                        int hw_ip, int instance, int ring,
+                        u32 hw_ip, u32 instance, u32 ring,
                         struct amdgpu_ring **out_ring)
 {
        int r, ip_num_rings;
index f337c31..06525f2 100644 (file)
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _AMDGPU_TRACE_H_
 
index 793b147..a296f7b 100644 (file)
@@ -1023,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] =
        {mmPA_SC_RASTER_CONFIG_1, true},
 };
 
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
-                                         u32 se_num, u32 sh_num,
-                                         u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+                                      bool indexed, u32 se_num,
+                                      u32 sh_num, u32 reg_offset)
 {
-       uint32_t val;
+       if (indexed) {
+               uint32_t val;
+               unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+               unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+               switch (reg_offset) {
+               case mmCC_RB_BACKEND_DISABLE:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+               case mmGC_USER_RB_BACKEND_DISABLE:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+               case mmPA_SC_RASTER_CONFIG:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+               case mmPA_SC_RASTER_CONFIG_1:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+               }
 
-       mutex_lock(&adev->grbm_idx_mutex);
-       if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+               mutex_lock(&adev->grbm_idx_mutex);
+               if (se_num != 0xffffffff || sh_num != 0xffffffff)
+                       amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
-       val = RREG32(reg_offset);
+               val = RREG32(reg_offset);
 
-       if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-       mutex_unlock(&adev->grbm_idx_mutex);
-       return val;
+               if (se_num != 0xffffffff || sh_num != 0xffffffff)
+                       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               mutex_unlock(&adev->grbm_idx_mutex);
+               return val;
+       } else {
+               unsigned idx;
+
+               switch (reg_offset) {
+               case mmGB_ADDR_CONFIG:
+                       return adev->gfx.config.gb_addr_config;
+               case mmMC_ARB_RAMCFG:
+                       return adev->gfx.config.mc_arb_ramcfg;
+               case mmGB_TILE_MODE0:
+               case mmGB_TILE_MODE1:
+               case mmGB_TILE_MODE2:
+               case mmGB_TILE_MODE3:
+               case mmGB_TILE_MODE4:
+               case mmGB_TILE_MODE5:
+               case mmGB_TILE_MODE6:
+               case mmGB_TILE_MODE7:
+               case mmGB_TILE_MODE8:
+               case mmGB_TILE_MODE9:
+               case mmGB_TILE_MODE10:
+               case mmGB_TILE_MODE11:
+               case mmGB_TILE_MODE12:
+               case mmGB_TILE_MODE13:
+               case mmGB_TILE_MODE14:
+               case mmGB_TILE_MODE15:
+               case mmGB_TILE_MODE16:
+               case mmGB_TILE_MODE17:
+               case mmGB_TILE_MODE18:
+               case mmGB_TILE_MODE19:
+               case mmGB_TILE_MODE20:
+               case mmGB_TILE_MODE21:
+               case mmGB_TILE_MODE22:
+               case mmGB_TILE_MODE23:
+               case mmGB_TILE_MODE24:
+               case mmGB_TILE_MODE25:
+               case mmGB_TILE_MODE26:
+               case mmGB_TILE_MODE27:
+               case mmGB_TILE_MODE28:
+               case mmGB_TILE_MODE29:
+               case mmGB_TILE_MODE30:
+               case mmGB_TILE_MODE31:
+                       idx = (reg_offset - mmGB_TILE_MODE0);
+                       return adev->gfx.config.tile_mode_array[idx];
+               case mmGB_MACROTILE_MODE0:
+               case mmGB_MACROTILE_MODE1:
+               case mmGB_MACROTILE_MODE2:
+               case mmGB_MACROTILE_MODE3:
+               case mmGB_MACROTILE_MODE4:
+               case mmGB_MACROTILE_MODE5:
+               case mmGB_MACROTILE_MODE6:
+               case mmGB_MACROTILE_MODE7:
+               case mmGB_MACROTILE_MODE8:
+               case mmGB_MACROTILE_MODE9:
+               case mmGB_MACROTILE_MODE10:
+               case mmGB_MACROTILE_MODE11:
+               case mmGB_MACROTILE_MODE12:
+               case mmGB_MACROTILE_MODE13:
+               case mmGB_MACROTILE_MODE14:
+               case mmGB_MACROTILE_MODE15:
+                       idx = (reg_offset - mmGB_MACROTILE_MODE0);
+                       return adev->gfx.config.macrotile_mode_array[idx];
+               default:
+                       return RREG32(reg_offset);
+               }
+       }
 }
 
 static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -1048,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
 
        *value = 0;
        for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+               bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
                if (reg_offset != cik_allowed_read_registers[i].reg_offset)
                        continue;
 
-               *value = cik_allowed_read_registers[i].grbm_indexed ?
-                        cik_read_indexed_register(adev, se_num,
-                                                  sh_num, reg_offset) :
-                        RREG32(reg_offset);
+               *value = cik_get_register_value(adev, indexed, se_num, sh_num,
+                                               reg_offset);
                return 0;
        }
        return -EINVAL;
index 5c8a7a4..419ba0c 100644 (file)
@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
                                                        adev->gfx.config.backend_enable_mask,
                                                        num_rb_pipes);
        }
+
+       /* cache the values for userspace */
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+                       adev->gfx.config.rb_config[i][j].rb_backend_disable =
+                               RREG32(mmCC_RB_BACKEND_DISABLE);
+                       adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+                               RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+                       adev->gfx.config.rb_config[i][j].raster_config =
+                               RREG32(mmPA_SC_RASTER_CONFIG);
+                       adev->gfx.config.rb_config[i][j].raster_config_1 =
+                               RREG32(mmPA_SC_RASTER_CONFIG_1);
+               }
+       }
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
index da43813..5aeb5f8 100644 (file)
@@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
                                  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
                                  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
                                  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
-                                 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
+                                 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
                                  PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
                                  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
                amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
index 1eb4d79..0450ac5 100644 (file)
@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
 
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
+       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
        adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
 }
 
index 7bb0bc0..342c2d9 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 #
 # Makefile for Heterogenous System Architecture support for AMD GPU devices
 #
index 6c5a9ca..f744cae 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
 #include <linux/device.h>
+#include <linux/printk.h>
 #include "kfd_priv.h"
 
 #define KFD_DRIVER_AUTHOR      "AMD Inc. and others"
@@ -132,7 +133,7 @@ static void __exit kfd_module_exit(void)
        kfd_process_destroy_wq();
        kfd_topology_shutdown();
        kfd_chardev_exit();
-       dev_info(kfd_device, "Removed module\n");
+       pr_info("amdkfd: Removed module\n");
 }
 
 module_init(kfd_module_init);
index 4859d26..4728fad 100644 (file)
@@ -202,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        struct cik_sdma_rlc_registers *m;
 
        m = get_sdma_mqd(mqd);
-       m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
-                       SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+       m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+                       << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
                        q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
                        1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
                        6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
index 2bec902..a3f1e62 100644 (file)
@@ -191,6 +191,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 
        switch (type) {
        case KFD_QUEUE_TYPE_SDMA:
+               if (dev->dqm->queue_count >=
+                       CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+                       pr_err("Over-subscription is not allowed for SDMA.\n");
+                       retval = -EPERM;
+                       goto err_create_queue;
+               }
+
+               retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+               if (retval != 0)
+                       goto err_create_queue;
+               pqn->q = q;
+               pqn->kq = NULL;
+               retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
+                                               &q->properties.vmid);
+               pr_debug("DQM returned %d for create_queue\n", retval);
+               print_queue(q);
+               break;
+
        case KFD_QUEUE_TYPE_COMPUTE:
                /* check if there is over subscription */
                if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
index 8ba37dd..c27c81c 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the DAL (Display Abstract Layer), which is a  sub-component
 # of the AMDGPU drm driver.
 # It provides the HW control for display related functionalities.
index 4699e47..2b72009 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'dm' sub-component of DAL.
 # It provides the control and status of dm blocks.
 
index 889ed24..bb5fa89 100644 (file)
@@ -520,7 +520,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
 
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                aconnector = to_amdgpu_dm_connector(connector);
-               if (aconnector->dc_link->type == dc_connection_mst_branch) {
+               if (aconnector->dc_link->type == dc_connection_mst_branch &&
+                   aconnector->mst_mgr.aux) {
                        DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
                                        aconnector, aconnector->base.base.id);
 
@@ -677,6 +678,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 
                mutex_lock(&aconnector->hpd_lock);
                dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+               if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+                       aconnector->fake_enable = false;
+
                aconnector->dc_sink = NULL;
                amdgpu_dm_update_connector_after_detect(aconnector);
                mutex_unlock(&aconnector->hpd_lock);
@@ -711,7 +716,6 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 
        ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
 
-       drm_atomic_state_put(adev->dm.cached_state);
        adev->dm.cached_state = NULL;
 
        amdgpu_dm_irq_resume_late(adev);
@@ -2332,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                       const struct dm_connector_state *dm_state)
 {
        struct drm_display_mode *preferred_mode = NULL;
-       const struct drm_connector *drm_connector;
+       struct drm_connector *drm_connector;
        struct dc_stream_state *stream = NULL;
        struct drm_display_mode mode = *drm_mode;
        bool native_mode_found = false;
@@ -2351,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (!aconnector->dc_sink) {
                /*
-                * Exclude MST from creating fake_sink
-                * TODO: need to enable MST into fake_sink feature
+                * Create dc_sink when necessary to MST
+                * Don't apply fake_sink to MST
                 */
-               if (aconnector->mst_port)
-                       goto stream_create_fail;
+               if (aconnector->mst_port) {
+                       dm_dp_mst_dc_sink_create(drm_connector);
+                       goto mst_dc_sink_create_done;
+               }
 
                if (create_fake_sink(aconnector))
                        goto stream_create_fail;
@@ -2406,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 stream_create_fail:
 dm_state_null:
 drm_connector_null:
+mst_dc_sink_create_done:
        return stream;
 }
 
@@ -2704,7 +2711,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
                        .link = aconnector->dc_link,
                        .sink_signal = SIGNAL_TYPE_VIRTUAL
        };
-       struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+       struct edid *edid;
 
        if (!aconnector->base.edid_blob_ptr ||
                !aconnector->base.edid_blob_ptr->data) {
@@ -2716,6 +2723,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
                return;
        }
 
+       edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
        aconnector->edid = edid;
 
        aconnector->dc_em_sink = dc_link_add_remote_sink(
@@ -4193,13 +4202,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
                                dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
 
+               if (!dm_new_crtc_state->stream)
+                       continue;
+
                status = dc_stream_get_status(dm_new_crtc_state->stream);
                WARN_ON(!status);
                WARN_ON(!status->plane_count);
 
-               if (!dm_new_crtc_state->stream)
-                       continue;
-
                /*TODO How it works with MPO ?*/
                if (!dc_commit_planes_to_stream(
                                dm->dc,
@@ -4253,7 +4262,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        drm_atomic_helper_commit_hw_done(state);
 
        if (wait_for_vblank)
-               drm_atomic_helper_wait_for_vblanks(dev, state);
+               drm_atomic_helper_wait_for_flip_done(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
 }
@@ -4332,9 +4341,11 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
                return;
 
        disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
-       acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+       if (!disconnected_acrtc)
+               return;
 
-       if (!disconnected_acrtc || !acrtc_state->stream)
+       acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+       if (!acrtc_state->stream)
                return;
 
        /*
@@ -4455,7 +4466,7 @@ static int dm_update_crtcs_state(struct dc *dc,
                        }
                }
 
-               if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+               if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
                                dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
 
                        new_crtc_state->mode_changed = false;
@@ -4709,7 +4720,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                }
        } else {
                for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-                       if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+                       if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+                                       !new_crtc_state->color_mgmt_changed)
                                continue;
 
                        if (!new_crtc_state->enable)
index 117521c..0230250 100644 (file)
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
        struct mutex hpd_lock;
 
        bool fake_enable;
+
+       bool mst_connected;
 };
 
 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
index f8efb98..638c2c2 100644 (file)
@@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
        return ret;
 }
 
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+{
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+       struct edid *edid;
+       struct dc_sink *dc_sink;
+       struct dc_sink_init_data init_params = {
+                       .link = aconnector->dc_link,
+                       .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+       edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+       if (!edid) {
+               drm_mode_connector_update_edid_property(
+                       &aconnector->base,
+                       NULL);
+               return;
+       }
+
+       aconnector->edid = edid;
+
+       dc_sink = dc_link_add_remote_sink(
+               aconnector->dc_link,
+               (uint8_t *)aconnector->edid,
+               (aconnector->edid->extensions + 1) * EDID_LENGTH,
+               &init_params);
+
+       dc_sink->priv = aconnector;
+       aconnector->dc_sink = dc_sink;
+
+       amdgpu_dm_add_sink_to_freesync_module(
+                       connector, aconnector->edid);
+
+       drm_mode_connector_update_edid_property(
+                                       &aconnector->base, aconnector->edid);
+}
+
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                        drm_mode_connector_set_path_property(connector, pathprop);
 
                        drm_connector_list_iter_end(&conn_iter);
+                       aconnector->mst_connected = true;
                        return &aconnector->base;
                }
        }
@@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
         */
        amdgpu_dm_connector_funcs_reset(connector);
 
+       aconnector->mst_connected = true;
+
        DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
                        aconnector, connector->base.id, aconnector->mst_port);
 
@@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        drm_mode_connector_update_edid_property(
                        &aconnector->base,
                        NULL);
+
+       aconnector->mst_connected = false;
 }
 
 static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
        drm_kms_helper_hotplug_event(dev);
 }
 
+static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
+{
+       mutex_lock(&connector->dev->mode_config.mutex);
+       drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+       mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
 static void dm_dp_mst_register_connector(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
        if (adev->mode_info.rfbdev)
                drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
 
        drm_connector_register(connector);
 
+       if (aconnector->mst_connected)
+               dm_dp_mst_link_status_reset(connector);
 }
 
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
index 2da851b..8cf51da 100644 (file)
@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
                                       struct amdgpu_dm_connector *aconnector);
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
 
 #endif
index 4f83e30..aed538a 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for Display Core (dc) component.
 #
 
index 43c5ccd..6af8c8a 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'utils' sub-component of DAL.
 # It provides the general basic services required by other DAL
 # subcomponents.
index 785b943..6e43168 100644 (file)
@@ -75,6 +75,9 @@ void dc_conn_log(struct dc_context *ctx,
                if (signal == signal_type_info_tbl[i].type)
                        break;
 
+       if (i == NUM_ELEMENTS(signal_type_info_tbl))
+               goto fail;
+
        dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
                        signal_type_info_tbl[i].name,
                        link->link_index);
@@ -96,6 +99,8 @@ void dc_conn_log(struct dc_context *ctx,
 
        dm_logger_append(&entry, "^\n");
        dm_helpers_dc_conn_log(ctx, &entry, event);
+
+fail:
        dm_logger_close(&entry);
 
        va_end(args);
index 6ec815d..239e86b 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'bios' sub-component of DAL.
 # It provides the parsing and executing controls for atom bios image.
 
index aaaebd0..86e6438 100644 (file)
@@ -249,7 +249,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
        struct graphics_object_id *dest_object_id)
 {
        uint32_t number;
-       uint16_t *id;
+       uint16_t *id = NULL;
        ATOM_OBJECT *object;
        struct bios_parser *bp = BP_FROM_DCB(dcb);
 
@@ -260,7 +260,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
 
        number = get_dest_obj_list(bp, object, &id);
 
-       if (number <= index)
+       if (number <= index || !id)
                return BP_RESULT_BADINPUT;
 
        *dest_object_id = object_id_from_bios_object_id(id[index]);
index 41ef359..7959e38 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'calcs' sub-component of DAL.
 # It calculates Bandwidth and Watermarks values for HW programming
 #
index 3dce35e..b142629 100644 (file)
@@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
                        v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
                        v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
                        v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+                       /*
+                        * Spreadsheet doesn't handle taps_c is one properly,
+                        * need to force Chroma to always be scaled to pass
+                        * bandwidth validation.
+                        */
+                       if (v->override_hta_pschroma[input_idx] == 1)
+                               v->override_hta_pschroma[input_idx] = 2;
+                       if (v->override_vta_pschroma[input_idx] == 1)
+                               v->override_vta_pschroma[input_idx] = 2;
                        v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
                }
                if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
index fe63f58..7240db2 100644 (file)
@@ -121,6 +121,10 @@ static bool create_links(
                        goto failed_alloc;
                }
 
+               link->link_index = dc->link_count;
+               dc->links[dc->link_count] = link;
+               dc->link_count++;
+
                link->ctx = dc->ctx;
                link->dc = dc;
                link->connector_signal = SIGNAL_TYPE_VIRTUAL;
@@ -129,6 +133,13 @@ static bool create_links(
                link->link_id.enum_id = ENUM_ID_1;
                link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
 
+               if (!link->link_enc) {
+                       BREAK_TO_DEBUGGER();
+                       goto failed_alloc;
+               }
+
+               link->link_status.dpcd_caps = &link->dpcd_caps;
+
                enc_init.ctx = dc->ctx;
                enc_init.channel = CHANNEL_ID_UNKNOWN;
                enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
@@ -138,10 +149,6 @@ static bool create_links(
                enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
                enc_init.encoder.enum_id = ENUM_ID_1;
                virtual_link_encoder_construct(link->link_enc, &enc_init);
-
-               link->link_index = dc->link_count;
-               dc->links[dc->link_count] = link;
-               dc->link_count++;
        }
 
        return true;
index 6acee54..43c7a7f 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 /*
  * dc_debug.c
  *
index 0602610..42a111b 100644 (file)
@@ -480,22 +480,6 @@ static void detect_dp(
                sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
                detect_dp_sink_caps(link);
 
-               /* DP active dongles */
-               if (is_dp_active_dongle(link)) {
-                       link->type = dc_connection_active_dongle;
-                       if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
-                               /*
-                                * active dongle unplug processing for short irq
-                                */
-                               link_disconnect_sink(link);
-                               return;
-                       }
-
-                       if (link->dpcd_caps.dongle_type !=
-                       DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
-                               *converter_disable_audio = true;
-                       }
-               }
                if (is_mst_supported(link)) {
                        sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
                        link->type = dc_connection_mst_branch;
@@ -535,6 +519,22 @@ static void detect_dp(
                                sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
                        }
                }
+
+               if (link->type != dc_connection_mst_branch &&
+                       is_dp_active_dongle(link)) {
+                       /* DP active dongles */
+                       link->type = dc_connection_active_dongle;
+                       if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+                               /*
+                                * active dongle unplug processing for short irq
+                                */
+                               link_disconnect_sink(link);
+                               return;
+                       }
+
+                       if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+                               *converter_disable_audio = true;
+               }
        } else {
                /* DP passive dongles */
                sink_caps->signal = dp_passive_dongle_detection(link->ddc,
@@ -1801,12 +1801,77 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
                link->link_enc->funcs->disable_output(link->link_enc, signal, link);
 }
 
+static bool dp_active_dongle_validate_timing(
+               const struct dc_crtc_timing *timing,
+               const struct dc_dongle_caps *dongle_caps)
+{
+       unsigned int required_pix_clk = timing->pix_clk_khz;
+
+       if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+               dongle_caps->extendedCapValid == false)
+               return true;
+
+       /* Check Pixel Encoding */
+       switch (timing->pixel_encoding) {
+       case PIXEL_ENCODING_RGB:
+       case PIXEL_ENCODING_YCBCR444:
+               break;
+       case PIXEL_ENCODING_YCBCR422:
+               if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+                       return false;
+               break;
+       case PIXEL_ENCODING_YCBCR420:
+               if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+                       return false;
+               break;
+       default:
+               /* Invalid Pixel Encoding*/
+               return false;
+       }
+
+
+       /* Check Color Depth and Pixel Clock */
+       if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               required_pix_clk /= 2;
+       else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+               required_pix_clk = required_pix_clk * 2 / 3;
+
+       switch (timing->display_color_depth) {
+       case COLOR_DEPTH_666:
+       case COLOR_DEPTH_888:
+               /*888 and 666 should always be supported*/
+               break;
+       case COLOR_DEPTH_101010:
+               if (dongle_caps->dp_hdmi_max_bpc < 10)
+                       return false;
+               required_pix_clk = required_pix_clk * 10 / 8;
+               break;
+       case COLOR_DEPTH_121212:
+               if (dongle_caps->dp_hdmi_max_bpc < 12)
+                       return false;
+               required_pix_clk = required_pix_clk * 12 / 8;
+               break;
+
+       case COLOR_DEPTH_141414:
+       case COLOR_DEPTH_161616:
+       default:
+               /* These color depths are currently not supported */
+               return false;
+       }
+
+       if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk)
+               return false;
+
+       return true;
+}
+
 enum dc_status dc_link_validate_mode_timing(
                const struct dc_stream_state *stream,
                struct dc_link *link,
                const struct dc_crtc_timing *timing)
 {
        uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+       struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
 
        /* A hack to avoid failing any modes for EDID override feature on
         * topology change such as lower quality cable for DP or different dongle
@@ -1814,8 +1879,13 @@ enum dc_status dc_link_validate_mode_timing(
        if (link->remote_sinks[0])
                return DC_OK;
 
+       /* Passive Dongle */
        if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
-               return DC_EXCEED_DONGLE_MAX_CLK;
+               return DC_EXCEED_DONGLE_CAP;
+
+       /* Active Dongle*/
+       if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+               return DC_EXCEED_DONGLE_CAP;
 
        switch (stream->signal) {
        case SIGNAL_TYPE_EDP:
index ced4248..e6bf05d 100644 (file)
@@ -1512,7 +1512,7 @@ static bool hpd_rx_irq_check_link_loss_status(
        struct dc_link *link,
        union hpd_irq_data *hpd_irq_dpcd_data)
 {
-       uint8_t irq_reg_rx_power_state;
+       uint8_t irq_reg_rx_power_state = 0;
        enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
        union lane_status lane_status;
        uint32_t lane;
@@ -1524,60 +1524,55 @@ static bool hpd_rx_irq_check_link_loss_status(
 
        if (link->cur_link_settings.lane_count == 0)
                return return_code;
-       /*1. Check that we can handle interrupt: Not in FS DOS,
-        *  Not in "Display Timeout" state, Link is trained.
-        */
 
-       dpcd_result = core_link_read_dpcd(link,
-               DP_SET_POWER,
-               &irq_reg_rx_power_state,
-               sizeof(irq_reg_rx_power_state));
+       /*1. Check that Link Status changed, before re-training.*/
 
-       if (dpcd_result != DC_OK) {
-               irq_reg_rx_power_state = DP_SET_POWER_D0;
-               dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
-                       "%s: DPCD read failed to obtain power state.\n",
-                       __func__);
+       /*parse lane status*/
+       for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+               /* check status of lanes 0,1
+                * changed DpcdAddress_Lane01Status (0x202)
+                */
+               lane_status.raw = get_nibble_at_index(
+                       &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+                       lane);
+
+               if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+                       !lane_status.bits.CR_DONE_0 ||
+                       !lane_status.bits.SYMBOL_LOCKED_0) {
+                       /* if one of the channel equalization, clock
+                        * recovery or symbol lock is dropped
+                        * consider it as (link has been
+                        * dropped) dp sink status has changed
+                        */
+                       sink_status_changed = true;
+                       break;
+               }
        }
 
-       if (irq_reg_rx_power_state == DP_SET_POWER_D0) {
-
-               /*2. Check that Link Status changed, before re-training.*/
-
-               /*parse lane status*/
-               for (lane = 0;
-                       lane < link->cur_link_settings.lane_count;
-                       lane++) {
+       /* Check interlane align.*/
+       if (sink_status_changed ||
+               !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
 
-                       /* check status of lanes 0,1
-                        * changed DpcdAddress_Lane01Status (0x202)*/
-                       lane_status.raw = get_nibble_at_index(
-                               &hpd_irq_dpcd_data->bytes.lane01_status.raw,
-                               lane);
-
-                       if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
-                               !lane_status.bits.CR_DONE_0 ||
-                               !lane_status.bits.SYMBOL_LOCKED_0) {
-                               /* if one of the channel equalization, clock
-                                * recovery or symbol lock is dropped
-                                * consider it as (link has been
-                                * dropped) dp sink status has changed*/
-                               sink_status_changed = true;
-                               break;
-                       }
+               dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+                       "%s: Link Status changed.\n", __func__);
 
-               }
+               return_code = true;
 
-               /* Check interlane align.*/
-               if (sink_status_changed ||
-                       !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
-                       INTERLANE_ALIGN_DONE) {
+               /*2. Check that we can handle interrupt: Not in FS DOS,
+                *  Not in "Display Timeout" state, Link is trained.
+                */
+               dpcd_result = core_link_read_dpcd(link,
+                       DP_SET_POWER,
+                       &irq_reg_rx_power_state,
+                       sizeof(irq_reg_rx_power_state));
 
+               if (dpcd_result != DC_OK) {
                        dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
-                               "%s: Link Status changed.\n",
+                               "%s: DPCD read failed to obtain power state.\n",
                                __func__);
-
-                       return_code = true;
+               } else {
+                       if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+                               return_code = false;
                }
        }
 
@@ -2062,6 +2057,24 @@ bool is_dp_active_dongle(const struct dc_link *link)
                        (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
 }
 
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+       switch (bpc) {
+       case DOWN_STREAM_MAX_8BPC:
+               return 8;
+       case DOWN_STREAM_MAX_10BPC:
+               return 10;
+       case DOWN_STREAM_MAX_12BPC:
+               return 12;
+       case DOWN_STREAM_MAX_16BPC:
+               return 16;
+       default:
+               break;
+       }
+
+       return -1;
+}
+
 static void get_active_converter_info(
        uint8_t data, struct dc_link *link)
 {
@@ -2131,7 +2144,8 @@ static void get_active_converter_info(
                                        hdmi_caps.bits.YCrCr420_CONVERSION;
 
                                link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
-                                       hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT;
+                                       translate_dpcd_max_bpc(
+                                               hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
 
                                link->dpcd_caps.dongle_caps.extendedCapValid = true;
                        }
index d1cdf9f..9288958 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -516,13 +516,11 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
                        right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
 
                if (right_view) {
-                       data->viewport.width /= 2;
-                       data->viewport_c.width /= 2;
-                       data->viewport.x +=  data->viewport.width;
-                       data->viewport_c.x +=  data->viewport_c.width;
+                       data->viewport.x +=  data->viewport.width / 2;
+                       data->viewport_c.x +=  data->viewport_c.width / 2;
                        /* Ceil offset pipe */
-                       data->viewport.width += data->viewport.width % 2;
-                       data->viewport_c.width += data->viewport_c.width % 2;
+                       data->viewport.width = (data->viewport.width + 1) / 2;
+                       data->viewport_c.width = (data->viewport_c.width + 1) / 2;
                } else {
                        data->viewport.width /= 2;
                        data->viewport_c.width /= 2;
@@ -580,14 +578,12 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
        if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
                pipe_ctx->plane_state) {
                if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
-                       pipe_ctx->plane_res.scl_data.recout.height /= 2;
-                       pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+                       pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
                        /* Floor primary pipe, ceil 2ndary pipe */
-                       pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2;
+                       pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
                } else {
-                       pipe_ctx->plane_res.scl_data.recout.width /= 2;
-                       pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
-                       pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2;
+                       pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
+                       pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
                }
        } else if (pipe_ctx->bottom_pipe &&
                        pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
@@ -856,6 +852,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
        pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
 
+
        /* Taps calculations */
        if (pipe_ctx->plane_res.xfm != NULL)
                res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
@@ -864,16 +861,21 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        if (pipe_ctx->plane_res.dpp != NULL)
                res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
                                pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
-
        if (!res) {
                /* Try 24 bpp linebuffer */
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
 
-               res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
-                       pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+               if (pipe_ctx->plane_res.xfm != NULL)
+                       res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+                                       pipe_ctx->plane_res.xfm,
+                                       &pipe_ctx->plane_res.scl_data,
+                                       &plane_state->scaling_quality);
 
-               res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
-                       pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+               if (pipe_ctx->plane_res.dpp != NULL)
+                       res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+                                       pipe_ctx->plane_res.dpp,
+                                       &pipe_ctx->plane_res.scl_data,
+                                       &plane_state->scaling_quality);
        }
 
        if (res)
@@ -991,8 +993,10 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
 
        head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
 
-       if (!head_pipe)
+       if (!head_pipe) {
                ASSERT(0);
+               return NULL;
+       }
 
        if (!head_pipe->plane_state)
                return head_pipe;
@@ -1447,11 +1451,16 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link(
 
 static struct audio *find_first_free_audio(
                struct resource_context *res_ctx,
-               const struct resource_pool *pool)
+               const struct resource_pool *pool,
+               enum engine_id id)
 {
        int i;
        for (i = 0; i < pool->audio_count; i++) {
                if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+                       /*we have enough audio endpoint, find the matching inst*/
+                       if (id != i)
+                               continue;
+
                        return pool->audios[i];
                }
        }
@@ -1700,7 +1709,7 @@ enum dc_status resource_map_pool_resources(
            dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
            stream->audio_info.mode_count) {
                pipe_ctx->stream_res.audio = find_first_free_audio(
-               &context->res_ctx, pool);
+               &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
 
                /*
                 * Audio assigned in order first come first get.
@@ -1765,13 +1774,16 @@ enum dc_status dc_validate_global_state(
        enum dc_status result = DC_ERROR_UNEXPECTED;
        int i, j;
 
+       if (!new_ctx)
+               return DC_ERROR_UNEXPECTED;
+
        if (dc->res_pool->funcs->validate_global) {
                        result = dc->res_pool->funcs->validate_global(dc, new_ctx);
                        if (result != DC_OK)
                                return result;
        }
 
-       for (i = 0; new_ctx && i < new_ctx->stream_count; i++) {
+       for (i = 0; i < new_ctx->stream_count; i++) {
                struct dc_stream_state *stream = new_ctx->streams[i];
 
                for (j = 0; j < dc->res_pool->pipe_count; j++) {
index b00a604..e230cc4 100644 (file)
@@ -263,7 +263,6 @@ bool dc_stream_set_cursor_position(
                struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
                struct mem_input *mi = pipe_ctx->plane_res.mi;
                struct hubp *hubp = pipe_ctx->plane_res.hubp;
-               struct transform *xfm = pipe_ctx->plane_res.xfm;
                struct dpp *dpp = pipe_ctx->plane_res.dpp;
                struct dc_cursor_position pos_cpy = *position;
                struct dc_cursor_mi_param param = {
@@ -294,11 +293,11 @@ bool dc_stream_set_cursor_position(
                if (mi != NULL && mi->funcs->set_cursor_position != NULL)
                        mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
 
-               if (hubp != NULL && hubp->funcs->set_cursor_position != NULL)
-                       hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+               if (!hubp)
+                       continue;
 
-               if (xfm != NULL && xfm->funcs->set_cursor_position != NULL)
-                       xfm->funcs->set_cursor_position(xfm, &pos_cpy, &param, hubp->curs_attr.width);
+               if (hubp->funcs->set_cursor_position != NULL)
+                       hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
 
                if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
                        dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
index 0d84b2a..90e81f7 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 /*
  * dc_helper.c
  *
index 8abec0b..11401fd 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for common 'dce' logic
 # HW object file under this folder follow similar pattern for HW programming
 #   - register offset and/or shift + mask stored in the dec_hw struct
index 81c40f8..0df9ecb 100644 (file)
@@ -352,11 +352,11 @@ void dce_aud_az_enable(struct audio *audio)
        uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
 
        set_reg_field_value(value, 1,
-                       AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
-                       CLOCK_GATING_DISABLE);
-               set_reg_field_value(value, 1,
-                       AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
-                       AUDIO_ENABLED);
+                           AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+                           CLOCK_GATING_DISABLE);
+       set_reg_field_value(value, 1,
+                           AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+                           AUDIO_ENABLED);
 
        AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
        value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
index 4fd49a1..e42b6eb 100644 (file)
@@ -87,6 +87,9 @@ static void dce110_update_generic_info_packet(
         */
        uint32_t max_retries = 50;
 
+       /*we need turn on clock before programming AFMT block*/
+       REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
        if (REG(AFMT_VBI_PACKET_CONTROL1)) {
                if (packet_index >= 8)
                        ASSERT(0);
index ea40870..a822d4e 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index 9091125..3ea43e2 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
index de8fdf4..2f366d6 100644 (file)
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
 /*
  * dce100_resource.h
  *
index 98d956e..d564c0e 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index 1229a33..d844fad 100644 (file)
@@ -991,6 +991,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
        struct dc_link *link = stream->sink->link;
        struct dc *dc = pipe_ctx->stream->ctx->dc;
 
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       if (dc_is_dp_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+                       pipe_ctx->stream_res.stream_enc, true);
        if (pipe_ctx->stream_res.audio) {
                pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
 
@@ -1015,18 +1025,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
                 */
        }
 
-       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
-
-       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
-
-       pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
-                       pipe_ctx->stream_res.stream_enc, true);
-
-
        /* blank at encoder level */
        if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
                if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
@@ -1774,6 +1772,10 @@ static enum dc_status validate_fbc(struct dc *dc,
        if (pipe_ctx->stream->sink->link->psr_enabled)
                return DC_ERROR_UNEXPECTED;
 
+       /* Nothing to compress */
+       if (!pipe_ctx->plane_state)
+               return DC_ERROR_UNEXPECTED;
+
        /* Only for non-linear tiling */
        if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
                return DC_ERROR_UNEXPECTED;
@@ -1868,8 +1870,10 @@ static void dce110_reset_hw_ctx_wrap(
                                pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
                        struct clock_source *old_clk = pipe_ctx_old->clock_source;
 
-                       /* disable already, no need to disable again */
-                       if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off)
+                       /* Disable if new stream is null. O/w, if stream is
+                        * disabled already, no need to disable again.
+                        */
+                       if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
                                core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
 
                        pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
@@ -2862,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
                int num_planes,
                struct dc_state *context)
 {
-       int i, be_idx;
+       int i;
 
        if (num_planes == 0)
                return;
 
-       be_idx = -1;
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               if (stream == context->res_ctx.pipe_ctx[i].stream) {
-                       be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
-                       break;
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if (stream == pipe_ctx->stream) {
+                       if (!pipe_ctx->top_pipe &&
+                               (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+                               dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
                }
        }
 
@@ -2891,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
                                        context->stream_count);
 
                dce110_program_front_end_for_pipe(dc, pipe_ctx);
+
+               dc->hwss.update_plane_addr(dc, pipe_ctx);
+
                program_surface_visibility(dc, pipe_ctx);
 
        }
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if ((stream == pipe_ctx->stream) &&
+                       (!pipe_ctx->top_pipe) &&
+                       (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+                       dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+       }
 }
 
 static void dce110_power_down_fe(struct dc *dc, int fe_idx)
index db96d2b..42df17f 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -1037,11 +1037,13 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
        struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
                                                 GFP_KERNEL);
 
-       if ((dce110_tgv == NULL) ||
-               (dce110_xfmv == NULL) ||
-               (dce110_miv == NULL) ||
-               (dce110_oppv == NULL))
-                       return false;
+       if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) {
+               kfree(dce110_tgv);
+               kfree(dce110_xfmv);
+               kfree(dce110_miv);
+               kfree(dce110_oppv);
+               return false;
+       }
 
        dce110_opp_v_construct(dce110_oppv, ctx);
 
index 67ac737..4befce6 100644 (file)
@@ -1112,10 +1112,7 @@ bool dce110_timing_generator_validate_timing(
        enum signal_type signal)
 {
        uint32_t h_blank;
-       uint32_t h_back_porch;
-       uint32_t hsync_offset = timing->h_border_right +
-                       timing->h_front_porch;
-       uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+       uint32_t h_back_porch, hsync_offset, h_sync_start;
 
        struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
 
@@ -1124,6 +1121,9 @@ bool dce110_timing_generator_validate_timing(
        if (!timing)
                return false;
 
+       hsync_offset = timing->h_border_right + timing->h_front_porch;
+       h_sync_start = timing->h_addressable + hsync_offset;
+
        /* Currently we don't support 3D, so block all 3D timings */
        if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
                return false;
index 07d9303..59b4cd3 100644 (file)
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include "dm_services.h"
 
 /* include DCE11 register header files */
index 265ac43..8e09044 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index 1779b96..37db1f8 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
@@ -8,4 +29,4 @@ dce120_hw_sequencer.o
 
 AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
 
-AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
\ No newline at end of file
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
index c110589..bc388aa 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index ebeb882..f565a60 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for DCN.
 
 DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
index 74e7c82..a9d55d0 100644 (file)
@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
                        scl_data->taps.h_taps = 1;
                if (IDENTITY_RATIO(scl_data->ratios.vert))
                        scl_data->taps.v_taps = 1;
-               /*
-                * Spreadsheet doesn't handle taps_c is one properly,
-                * need to force Chroma to always be scaled to pass
-                * bandwidth validation.
-                */
+               if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+                       scl_data->taps.h_taps_c = 1;
+               if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+                       scl_data->taps.v_taps_c = 1;
        }
 
        return true;
index a9782b1..34daf89 100644 (file)
@@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
 
 void dpp1_cm_set_output_csc_default(
                struct dpp *dpp_base,
-               const struct default_adjustment *default_adjust);
+               enum dc_color_space colorspace);
 
 void dpp1_cm_set_gamut_remap(
        struct dpp *dpp,
index 40627c2..ed1216b 100644 (file)
@@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
 
 void dpp1_cm_set_output_csc_default(
                struct dpp *dpp_base,
-               const struct default_adjustment *default_adjust)
+               enum dc_color_space colorspace)
 {
 
        struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
        uint32_t ocsc_mode = 0;
 
-       if (default_adjust != NULL) {
-               switch (default_adjust->out_color_space) {
+       switch (colorspace) {
                case COLOR_SPACE_SRGB:
                case COLOR_SPACE_2020_RGB_FULLRANGE:
                        ocsc_mode = 0;
@@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
                case COLOR_SPACE_UNKNOWN:
                default:
                        break;
-               }
        }
 
        REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
index 961ad5c..05dc01e 100644 (file)
@@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
                        tbl_entry.color_space = color_space;
                        //tbl_entry.regval = matrix;
                        pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+       } else {
+               pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
        }
 }
 static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
index 4c4bd72..9fc8f82 100644 (file)
@@ -912,11 +912,13 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
        struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
        struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
 
-       if (!head_pipe)
+       if (!head_pipe) {
                ASSERT(0);
+               return NULL;
+       }
 
        if (!idle_pipe)
-               return false;
+               return NULL;
 
        idle_pipe->stream = head_pipe->stream;
        idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
index c7333cd..fced178 100644 (file)
@@ -496,9 +496,6 @@ static bool tgn10_validate_timing(
                timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
                return false;
 
-       if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
-               tg->ctx->dc->debug.disable_stereo_support)
-               return false;
        /* Temporarily blocking interlacing mode until it's supported */
        if (timing->flags.INTERLACE == 1)
                return false;
index 87bab8e..3488af2 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'utils' sub-component of DAL.
 # It provides the general basic services required by other DAL
 # subcomponents.
index 70d01a9..562ee18 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'gpio' sub-component of DAL.
 # It provides the control and status of HW GPIO pins.
 
index 5560340..352885c 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'i2c' sub-component of DAL.
 # It provides the control and status of HW i2c engine of the adapter.
 
index 01df856..94fc310 100644 (file)
@@ -38,7 +38,7 @@ enum dc_status {
        DC_FAIL_DETACH_SURFACES = 8,
        DC_FAIL_SURFACE_VALIDATE = 9,
        DC_NO_DP_LINK_BANDWIDTH = 10,
-       DC_EXCEED_DONGLE_MAX_CLK = 11,
+       DC_EXCEED_DONGLE_CAP = 11,
        DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
        DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
        DC_FAIL_SCALING = 14,
index 83a6846..9420dfb 100644 (file)
@@ -64,7 +64,7 @@ struct dpp_funcs {
 
        void (*opp_set_csc_default)(
                struct dpp *dpp,
-               const struct default_adjustment *default_adjust);
+               enum dc_color_space colorspace);
 
        void (*opp_set_csc_adjustment)(
                struct dpp *dpp,
index 3d33bcd..498b7f0 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 /*
  * link_encoder.h
  *
index 3050afe..b5db169 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 /*
  * stream_encoder.h
  *
index 7c08bc6..ea88997 100644 (file)
@@ -259,13 +259,6 @@ struct transform_funcs {
                        struct transform *xfm_base,
                        const struct dc_cursor_attributes *attr);
 
-       void (*set_cursor_position)(
-                       struct transform *xfm_base,
-                       const struct dc_cursor_position *pos,
-                       const struct dc_cursor_mi_param *param,
-                       uint32_t width
-                       );
-
 };
 
 const uint16_t *get_filter_2tap_16p(void);
index c7e93f7..498515a 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'audio' sub-component of DAL.
 # It provides the control and status of HW adapter resources,
 # that are global for the ASIC and sharable between pipes.
index fc0b731..07326d2 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the virtual sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index db8e0ff..fb9a499 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'freesync' sub-module of DAL.
 #
 
index 87cd700..6902430 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for AMD library routines, which are used by AMD driver
 # components.
 #
index 8c55c6e..231785a 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 
 subdir-ccflags-y += \
                -I$(FULL_AMD_PATH)/powerplay/inc/  \
index 824fb6f..a212c27 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 #
 # Makefile for the 'hw manager' sub-component of powerplay.
 # It provides the hardware management services for the driver.
index 67fae83..8de384b 100644 (file)
@@ -1,4 +1,26 @@
-// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include "pp_overdriver.h"
 #include <linux/errno.h>
 
index 08cd70c..9ad1cef 100644 (file)
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #ifndef SMU72_H
 #define SMU72_H
 
index b2edbc0..2aefbb8 100644 (file)
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #ifndef SMU72_DISCRETE_H
 #define SMU72_DISCRETE_H
 
index 30d3089..98e701e 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 #
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
index 283a0dc..07129e6 100644 (file)
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _GPU_SCHED_TRACE_H_
 
index 72b22b8..5a5427b 100644 (file)
@@ -317,9 +317,8 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
                                       formats, ARRAY_SIZE(formats),
                                       NULL,
                                       DRM_PLANE_TYPE_PRIMARY, NULL);
-       if (ret) {
+       if (ret)
                return ERR_PTR(ret);
-       }
 
        drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
        hdlcd->plane = plane;
index 764d0c8..0afb53b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/console.h>
 #include <linux/list.h>
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
@@ -354,7 +355,7 @@ err_unload:
 err_free:
        drm_mode_config_cleanup(drm);
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        return ret;
 }
@@ -379,7 +380,7 @@ static void hdlcd_drm_unbind(struct device *dev)
        pm_runtime_disable(drm->dev);
        of_reserved_mem_device_release(drm->dev);
        drm_mode_config_cleanup(drm);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
 }
@@ -432,9 +433,11 @@ static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
                return 0;
 
        drm_kms_helper_poll_disable(drm);
+       drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
 
        hdlcd->state = drm_atomic_helper_suspend(drm);
        if (IS_ERR(hdlcd->state)) {
+               drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
                drm_kms_helper_poll_enable(drm);
                return PTR_ERR(hdlcd->state);
        }
@@ -451,8 +454,8 @@ static int __maybe_unused hdlcd_pm_resume(struct device *dev)
                return 0;
 
        drm_atomic_helper_resume(drm, hdlcd->state);
+       drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
        drm_kms_helper_poll_enable(drm);
-       pm_runtime_set_active(dev);
 
        return 0;
 }
index 3615d18..904fff8 100644 (file)
@@ -65,8 +65,8 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
        /* We rely on firmware to set mclk to a sensible level. */
        clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
 
-       hwdev->modeset(hwdev, &vm);
-       hwdev->leave_config_mode(hwdev);
+       hwdev->hw->modeset(hwdev, &vm);
+       hwdev->hw->leave_config_mode(hwdev);
        drm_crtc_vblank_on(crtc);
 }
 
@@ -77,8 +77,12 @@ static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
        struct malidp_hw_device *hwdev = malidp->dev;
        int err;
 
+       /* always disable planes on the CRTC that is being turned off */
+       drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
        drm_crtc_vblank_off(crtc);
-       hwdev->enter_config_mode(hwdev);
+       hwdev->hw->enter_config_mode(hwdev);
+
        clk_disable_unprepare(hwdev->pxlclk);
 
        err = pm_runtime_put(crtc->dev->dev);
@@ -319,7 +323,7 @@ static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
 
 mclk_calc:
        drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
-       ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+       ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
        if (ret < 0)
                return -EINVAL;
        return 0;
@@ -475,7 +479,7 @@ static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-                            hwdev->map.de_irq_map.vsync_irq);
+                            hwdev->hw->map.de_irq_map.vsync_irq);
        return 0;
 }
 
@@ -485,7 +489,7 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
-                             hwdev->map.de_irq_map.vsync_irq);
+                             hwdev->hw->map.de_irq_map.vsync_irq);
 }
 
 static const struct drm_crtc_funcs malidp_crtc_funcs = {
index b894466..91f2b01 100644 (file)
@@ -47,10 +47,10 @@ static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
         * directly.
         */
        malidp_hw_write(hwdev, gamma_write_mask,
-                       hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+                       hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
        for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
                malidp_hw_write(hwdev, data[i],
-                               hwdev->map.coeffs_base +
+                               hwdev->hw->map.coeffs_base +
                                MALIDP_COEF_TABLE_DATA);
 }
 
@@ -103,7 +103,7 @@ void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
                        for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
                                malidp_hw_write(hwdev,
                                                mc->coloradj_coeffs[i],
-                                               hwdev->map.coeffs_base +
+                                               hwdev->hw->map.coeffs_base +
                                                MALIDP_COLOR_ADJ_COEF + 4 * i);
 
                malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
@@ -120,8 +120,8 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
        struct malidp_hw_device *hwdev = malidp->dev;
        struct malidp_se_config *s = &cs->scaler_config;
        struct malidp_se_config *old_s = &old_cs->scaler_config;
-       u32 se_control = hwdev->map.se_base +
-                        ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+       u32 se_control = hwdev->hw->map.se_base +
+                        ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
                         0x10 : 0xC);
        u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
        u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
@@ -135,7 +135,7 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
                return;
        }
 
-       hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+       hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
        val = malidp_hw_read(hwdev, se_control);
        val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
 
@@ -170,9 +170,9 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
        int ret;
 
        atomic_set(&malidp->config_valid, 0);
-       hwdev->set_config_valid(hwdev);
+       hwdev->hw->set_config_valid(hwdev);
        /* don't wait for config_valid flag if we are in config mode */
-       if (hwdev->in_config_mode(hwdev))
+       if (hwdev->hw->in_config_mode(hwdev))
                return 0;
 
        ret = wait_event_interruptible_timeout(malidp->wq,
@@ -455,7 +455,7 @@ static int malidp_runtime_pm_suspend(struct device *dev)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        /* we can only suspend if the hardware is in config mode */
-       WARN_ON(!hwdev->in_config_mode(hwdev));
+       WARN_ON(!hwdev->hw->in_config_mode(hwdev));
 
        hwdev->pm_suspended = true;
        clk_disable_unprepare(hwdev->mclk);
@@ -500,11 +500,7 @@ static int malidp_bind(struct device *dev)
        if (!hwdev)
                return -ENOMEM;
 
-       /*
-        * copy the associated data from malidp_drm_of_match to avoid
-        * having to keep a reference to the OF node after binding
-        */
-       memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+       hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
        malidp->dev = hwdev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -568,13 +564,13 @@ static int malidp_bind(struct device *dev)
                goto query_hw_fail;
        }
 
-       ret = hwdev->query_hw(hwdev);
+       ret = hwdev->hw->query_hw(hwdev);
        if (ret) {
                DRM_ERROR("Invalid HW configuration\n");
                goto query_hw_fail;
        }
 
-       version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+       version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
        DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
                 (version >> 12) & 0xf, (version >> 8) & 0xf);
 
@@ -589,7 +585,7 @@ static int malidp_bind(struct device *dev)
 
        for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
                out_depth = (out_depth << 8) | (output_width[i] & 0xf);
-       malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+       malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
 
        atomic_set(&malidp->config_valid, 0);
        init_waitqueue_head(&malidp->wq);
@@ -671,7 +667,7 @@ query_hw_fail:
                malidp_runtime_pm_suspend(dev);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 alloc_fail:
        of_reserved_mem_device_release(dev);
 
@@ -704,7 +700,7 @@ static void malidp_unbind(struct device *dev)
                malidp_runtime_pm_suspend(dev);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        of_reserved_mem_device_release(dev);
 }
 
index 17bca99..2bfb542 100644 (file)
@@ -183,7 +183,7 @@ static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
 
        malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
                        break;
                /*
@@ -203,7 +203,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
        malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
        malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
                        break;
                usleep_range(100, 1000);
@@ -216,7 +216,7 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
 {
        u32 status;
 
-       status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
        if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
                return true;
 
@@ -407,7 +407,7 @@ static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
 
        malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
                        break;
                /*
@@ -427,7 +427,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
        malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
        malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
                        break;
                usleep_range(100, 1000);
@@ -440,7 +440,7 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
 {
        u32 status;
 
-       status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
        if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
                return true;
 
@@ -616,7 +616,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev)
        return 0;
 }
 
-const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
        [MALIDP_500] = {
                .map = {
                        .coeffs_base = MALIDP500_COEFFS_BASE,
@@ -751,7 +751,7 @@ static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 ir
 {
        u32 base = malidp_get_block_base(hwdev, block);
 
-       if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+       if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
                malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
        else
                malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
@@ -762,12 +762,14 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
        struct drm_device *drm = arg;
        struct malidp_drm *malidp = drm->dev_private;
        struct malidp_hw_device *hwdev;
+       struct malidp_hw *hw;
        const struct malidp_irq_map *de;
        u32 status, mask, dc_status;
        irqreturn_t ret = IRQ_NONE;
 
        hwdev = malidp->dev;
-       de = &hwdev->map.de_irq_map;
+       hw = hwdev->hw;
+       de = &hw->map.de_irq_map;
 
        /*
         * if we are suspended it is likely that we were invoked because
@@ -778,8 +780,8 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
                return IRQ_NONE;
 
        /* first handle the config valid IRQ */
-       dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
-       if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+       dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+       if (dc_status & hw->map.dc_irq_map.vsync_irq) {
                /* we have a page flip event */
                atomic_set(&malidp->config_valid, 1);
                malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
@@ -832,11 +834,11 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
 
        /* first enable the DC block IRQs */
        malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
-                            hwdev->map.dc_irq_map.irq_mask);
+                            hwdev->hw->map.dc_irq_map.irq_mask);
 
        /* now enable the DE block IRQs */
        malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-                            hwdev->map.de_irq_map.irq_mask);
+                            hwdev->hw->map.de_irq_map.irq_mask);
 
        return 0;
 }
@@ -847,9 +849,9 @@ void malidp_de_irq_fini(struct drm_device *drm)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
-                             hwdev->map.de_irq_map.irq_mask);
+                             hwdev->hw->map.de_irq_map.irq_mask);
        malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
-                             hwdev->map.dc_irq_map.irq_mask);
+                             hwdev->hw->map.dc_irq_map.irq_mask);
 }
 
 static irqreturn_t malidp_se_irq(int irq, void *arg)
@@ -857,6 +859,8 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
        struct drm_device *drm = arg;
        struct malidp_drm *malidp = drm->dev_private;
        struct malidp_hw_device *hwdev = malidp->dev;
+       struct malidp_hw *hw = hwdev->hw;
+       const struct malidp_irq_map *se = &hw->map.se_irq_map;
        u32 status, mask;
 
        /*
@@ -867,12 +871,12 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
        if (hwdev->pm_suspended)
                return IRQ_NONE;
 
-       status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
-       if (!(status & hwdev->map.se_irq_map.irq_mask))
+       status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
+       if (!(status & se->irq_mask))
                return IRQ_NONE;
 
-       mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
-       status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+       mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
+       status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
        status &= mask;
        /* ToDo: status decoding and firing up of VSYNC and page flip events */
 
@@ -905,7 +909,7 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
        }
 
        malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
-                            hwdev->map.se_irq_map.irq_mask);
+                            hwdev->hw->map.se_irq_map.irq_mask);
 
        return 0;
 }
@@ -916,5 +920,5 @@ void malidp_se_irq_fini(struct drm_device *drm)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
-                             hwdev->map.se_irq_map.irq_mask);
+                             hwdev->hw->map.se_irq_map.irq_mask);
 }
index 849ad9a..b0690eb 100644 (file)
@@ -120,18 +120,14 @@ struct malidp_hw_regmap {
 /* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
 #define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0)
 
-struct malidp_hw_device {
-       const struct malidp_hw_regmap map;
-       void __iomem *regs;
+struct malidp_hw_device;
 
-       /* APB clock */
-       struct clk *pclk;
-       /* AXI clock */
-       struct clk *aclk;
-       /* main clock for display core */
-       struct clk *mclk;
-       /* pixel clock for display core */
-       struct clk *pxlclk;
+/*
+ * Static structure containing hardware specific data and pointers to
+ * functions that behave differently between various versions of the IP.
+ */
+struct malidp_hw {
+       const struct malidp_hw_regmap map;
 
        /*
         * Validate the driver instance against the hardware bits
@@ -182,15 +178,6 @@ struct malidp_hw_device {
                             struct videomode *vm);
 
        u8 features;
-
-       u8 min_line_size;
-       u16 max_line_size;
-
-       /* track the device PM state */
-       bool pm_suspended;
-
-       /* size of memory used for rotating layers, up to two banks available */
-       u32 rotation_memory[2];
 };
 
 /* Supported variants of the hardware */
@@ -202,7 +189,33 @@ enum {
        MALIDP_MAX_DEVICES
 };
 
-extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES];
+
+/*
+ * Structure used by the driver during runtime operation.
+ */
+struct malidp_hw_device {
+       struct malidp_hw *hw;
+       void __iomem *regs;
+
+       /* APB clock */
+       struct clk *pclk;
+       /* AXI clock */
+       struct clk *aclk;
+       /* main clock for display core */
+       struct clk *mclk;
+       /* pixel clock for display core */
+       struct clk *pxlclk;
+
+       u8 min_line_size;
+       u16 max_line_size;
+
+       /* track the device PM state */
+       bool pm_suspended;
+
+       /* size of memory used for rotating layers, up to two banks available */
+       u32 rotation_memory[2];
+};
 
 static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
 {
@@ -240,9 +253,9 @@ static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
 {
        switch (block) {
        case MALIDP_SE_BLOCK:
-               return hwdev->map.se_base;
+               return hwdev->hw->map.se_base;
        case MALIDP_DC_BLOCK:
-               return hwdev->map.dc_base;
+               return hwdev->hw->map.dc_base;
        }
 
        return 0;
@@ -275,7 +288,7 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
 static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
                                         unsigned int pitch)
 {
-       return !(pitch & (hwdev->map.bus_align_bytes - 1));
+       return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
 }
 
 /* U16.16 */
@@ -308,8 +321,8 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
        };
        u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
                  MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
-       u32 image_enh = hwdev->map.se_base +
-                       ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+       u32 image_enh = hwdev->hw->map.se_base +
+                       ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
                         0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
        u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
        int i;
index 94e7e3f..e741979 100644 (file)
@@ -57,7 +57,7 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
        struct malidp_plane *mp = to_malidp_plane(plane);
 
        if (mp->base.fb)
-               drm_framebuffer_unreference(mp->base.fb);
+               drm_framebuffer_put(mp->base.fb);
 
        drm_plane_helper_disable(plane);
        drm_plane_cleanup(plane);
@@ -185,8 +185,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
 
        fb = state->fb;
 
-       ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
-                                           fb->format->format);
+       ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
+                                            mp->layer->id,
+                                            fb->format->format);
        if (ms->format == MALIDP_INVALID_FORMAT_ID)
                return -EINVAL;
 
@@ -211,7 +212,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
         * third plane stride register.
         */
        if (ms->n_planes == 3 &&
-           !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+           !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
            (state->fb->pitches[1] != state->fb->pitches[2]))
                return -EINVAL;
 
@@ -229,9 +230,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        if (state->rotation & MALIDP_ROTATED_MASK) {
                int val;
 
-               val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
-                                                state->crtc_w,
-                                                fb->format->format);
+               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
+                                                    state->crtc_w,
+                                                    fb->format->format);
                if (val < 0)
                        return val;
 
@@ -251,7 +252,7 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
                return;
 
        if (num_planes == 3)
-               num_strides = (mp->hwdev->features &
+               num_strides = (mp->hwdev->hw->features &
                               MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
 
        for (i = 0; i < num_strides; ++i)
@@ -264,13 +265,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
                                   struct drm_plane_state *old_state)
 {
        struct malidp_plane *mp;
-       const struct malidp_hw_regmap *map;
        struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
        u32 src_w, src_h, dest_w, dest_h, val;
        int i;
 
        mp = to_malidp_plane(plane);
-       map = &mp->hwdev->map;
 
        /* convert src values from Q16 fixed point to integer */
        src_w = plane->state->src_w >> 16;
@@ -363,7 +362,7 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
 int malidp_de_planes_init(struct drm_device *drm)
 {
        struct malidp_drm *malidp = drm->dev_private;
-       const struct malidp_hw_regmap *map = &malidp->dev->map;
+       const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
        struct malidp_plane *plane = NULL;
        enum drm_plane_type plane_type;
        unsigned long crtcs = 1 << drm->mode_config.num_crtc;
index 2e065fa..a0f4d2a 100644 (file)
@@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
 void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
        int x, int y)
 {
+       const struct drm_format_info *format = fb->format;
+       unsigned int num_planes = format->num_planes;
        u32 addr = drm_fb_obj(fb)->dev_addr;
-       int num_planes = fb->format->num_planes;
        int i;
 
        if (num_planes > 3)
                num_planes = 3;
 
-       for (i = 0; i < num_planes; i++)
+       addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+                  x * format->cpp[0];
+
+       y /= format->vsub;
+       x /= format->hsub;
+
+       for (i = 1; i < num_planes; i++)
                addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
-                            x * fb->format->cpp[i];
+                            x * format->cpp[i];
        for (; i < 3; i++)
                addrs[i] = 0;
 }
@@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
        if (plane->fb)
                drm_framebuffer_put(plane->fb);
 
-       /* Power down the Y/U/V FIFOs */
-       sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
-
        /* Power down most RAMs and FIFOs if this is the primary plane */
        if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
-               sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
-                             CFG_PDWN32x32 | CFG_PDWN64x66;
+               sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+                            CFG_PDWN32x32 | CFG_PDWN64x66;
                dma_ctrl0_mask = CFG_GRA_ENA;
        } else {
+               /* Power down the Y/U/V FIFOs */
+               sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
                dma_ctrl0_mask = CFG_DMA_ENA;
        }
 
@@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
 
        ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
                               dcrtc);
-       if (ret < 0) {
-               kfree(dcrtc);
-               return ret;
-       }
+       if (ret < 0)
+               goto err_crtc;
 
        if (dcrtc->variant->init) {
                ret = dcrtc->variant->init(dcrtc, dev);
-               if (ret) {
-                       kfree(dcrtc);
-                       return ret;
-               }
+               if (ret)
+                       goto err_crtc;
        }
 
        /* Ensure AXI pipeline is enabled */
@@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
        dcrtc->crtc.port = port;
 
        primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-       if (!primary)
-               return -ENOMEM;
+       if (!primary) {
+               ret = -ENOMEM;
+               goto err_crtc;
+       }
 
        ret = armada_drm_plane_init(primary);
        if (ret) {
                kfree(primary);
-               return ret;
+               goto err_crtc;
        }
 
        ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
                                       DRM_PLANE_TYPE_PRIMARY, NULL);
        if (ret) {
                kfree(primary);
-               return ret;
+               goto err_crtc;
        }
 
        ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
 
 err_crtc_init:
        primary->base.funcs->destroy(&primary->base);
+err_crtc:
+       kfree(dcrtc);
+
        return ret;
 }
 
index bab11f4..bfd3514 100644 (file)
@@ -42,6 +42,8 @@ struct armada_plane_work {
 };
 
 struct armada_plane_state {
+       u16 src_x;
+       u16 src_y;
        u32 src_hw;
        u32 dst_hw;
        u32 dst_yx;
index b411b60..aba9476 100644 (file)
@@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
 {
        struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       const struct drm_format_info *format;
        struct drm_rect src = {
                .x1 = src_x,
                .y1 = src_y,
@@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        };
        uint32_t val, ctrl0;
        unsigned idx = 0;
-       bool visible;
+       bool visible, fb_changed;
        int ret;
 
        trace_armada_ovl_plane_update(plane, crtc, fb,
@@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        if (!visible)
                ctrl0 &= ~CFG_DMA_ENA;
 
+       /*
+        * Shifting a YUV packed format image by one pixel causes the U/V
+        * planes to swap.  Compensate for it by also toggling the UV swap.
+        */
+       format = fb->format;
+       if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
+               ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+       fb_changed = plane->fb != fb ||
+                    dplane->base.state.src_x != src.x1 >> 16 ||
+                    dplane->base.state.src_y != src.y1 >> 16;
+
        if (!dcrtc->plane) {
                dcrtc->plane = plane;
                armada_ovl_update_attr(&dplane->prop, dcrtc);
@@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
 
        /* FIXME: overlay on an interlaced display */
        /* Just updating the position/size? */
-       if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
+       if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
                val = (drm_rect_height(&src) & 0xffff0000) |
                      drm_rect_width(&src) >> 16;
                dplane->base.state.src_hw = val;
@@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
                armada_drm_plane_work_cancel(dcrtc, &dplane->base);
 
-       if (plane->fb != fb) {
-               u32 addrs[3], pixel_format;
-               int num_planes, hsub;
+       if (fb_changed) {
+               u32 addrs[3];
 
                /*
                 * Take a reference on the new framebuffer - we want to
@@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                if (plane->fb)
                        armada_ovl_retire_fb(dplane, plane->fb);
 
-               src_y = src.y1 >> 16;
-               src_x = src.x1 >> 16;
+               dplane->base.state.src_y = src_y = src.y1 >> 16;
+               dplane->base.state.src_x = src_x = src.x1 >> 16;
 
                armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
 
-               pixel_format = fb->format->format;
-               hsub = drm_format_horz_chroma_subsampling(pixel_format);
-               num_planes = fb->format->num_planes;
-
-               /*
-                * Annoyingly, shifting a YUYV-format image by one pixel
-                * causes the U/V planes to toggle.  Toggle the UV swap.
-                * (Unfortunately, this causes momentary colour flickering.)
-                */
-               if (src_x & (hsub - 1) && num_planes == 1)
-                       ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
-
                armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
                                     LCD_SPU_DMA_START_ADDR_Y0);
                armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
index b4efcba..d034b2c 100644 (file)
@@ -372,9 +372,18 @@ struct adv7511 {
 };
 
 #ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
-                    unsigned int offset);
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
 void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#else
+static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+       unsigned int offset = adv7511->type == ADV7533 ?
+                                               ADV7533_REG_CEC_OFFSET : 0;
+
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+                    ADV7511_CEC_CTRL_POWER_DOWN);
+       return 0;
+}
 #endif
 
 #ifdef CONFIG_DRM_I2C_ADV7533
index b33d730..a20a45c 100644 (file)
@@ -300,18 +300,21 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
        return 0;
 }
 
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
-                    unsigned int offset)
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
 {
+       unsigned int offset = adv7511->type == ADV7533 ?
+                                               ADV7533_REG_CEC_OFFSET : 0;
        int ret = adv7511_cec_parse_dt(dev, adv7511);
 
        if (ret)
-               return ret;
+               goto err_cec_parse_dt;
 
        adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
                adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
-       if (IS_ERR(adv7511->cec_adap))
-               return PTR_ERR(adv7511->cec_adap);
+       if (IS_ERR(adv7511->cec_adap)) {
+               ret = PTR_ERR(adv7511->cec_adap);
+               goto err_cec_alloc;
+       }
 
        regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
        /* cec soft reset */
@@ -329,9 +332,18 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
                     ((adv7511->cec_clk_freq / 750000) - 1) << 2);
 
        ret = cec_register_adapter(adv7511->cec_adap, dev);
-       if (ret) {
-               cec_delete_adapter(adv7511->cec_adap);
-               adv7511->cec_adap = NULL;
-       }
-       return ret;
+       if (ret)
+               goto err_cec_register;
+       return 0;
+
+err_cec_register:
+       cec_delete_adapter(adv7511->cec_adap);
+       adv7511->cec_adap = NULL;
+err_cec_alloc:
+       dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
+                ret);
+err_cec_parse_dt:
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+                    ADV7511_CEC_CTRL_POWER_DOWN);
+       return ret == -EPROBE_DEFER ? ret : 0;
 }
index 0e14f15..efa29db 100644 (file)
@@ -1084,7 +1084,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        struct device *dev = &i2c->dev;
        unsigned int main_i2c_addr = i2c->addr << 1;
        unsigned int edid_i2c_addr = main_i2c_addr + 4;
-       unsigned int offset;
        unsigned int val;
        int ret;
 
@@ -1192,24 +1191,16 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        if (adv7511->type == ADV7511)
                adv7511_set_link_config(adv7511, &link_config);
 
+       ret = adv7511_cec_init(dev, adv7511);
+       if (ret)
+               goto err_unregister_cec;
+
        adv7511->bridge.funcs = &adv7511_bridge_funcs;
        adv7511->bridge.of_node = dev->of_node;
 
        drm_bridge_add(&adv7511->bridge);
 
        adv7511_audio_init(dev, adv7511);
-
-       offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
-
-#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-       ret = adv7511_cec_init(dev, adv7511, offset);
-       if (ret)
-               goto err_unregister_cec;
-#else
-       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
-                    ADV7511_CEC_CTRL_POWER_DOWN);
-#endif
-
        return 0;
 
 err_unregister_cec:
index 5dd3f1c..a890504 100644 (file)
@@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
                        return 0;
                }
 
+               pm_runtime_get_sync(dp->dev);
                edid = drm_get_edid(connector, &dp->aux.ddc);
+               pm_runtime_put(dp->dev);
                if (edid) {
                        drm_mode_connector_update_edid_property(&dp->connector,
                                                                edid);
index 0903ba5..75b0d3f 100644 (file)
 
 #include <linux/of_graph.h>
 
+struct lvds_encoder {
+       struct drm_bridge bridge;
+       struct drm_bridge *panel_bridge;
+};
+
+static int lvds_encoder_attach(struct drm_bridge *bridge)
+{
+       struct lvds_encoder *lvds_encoder = container_of(bridge,
+                                                        struct lvds_encoder,
+                                                        bridge);
+
+       return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
+                                bridge);
+}
+
+static struct drm_bridge_funcs funcs = {
+       .attach = lvds_encoder_attach,
+};
+
 static int lvds_encoder_probe(struct platform_device *pdev)
 {
        struct device_node *port;
        struct device_node *endpoint;
        struct device_node *panel_node;
        struct drm_panel *panel;
-       struct drm_bridge *bridge;
+       struct lvds_encoder *lvds_encoder;
+
+       lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
+                                   GFP_KERNEL);
+       if (!lvds_encoder)
+               return -ENOMEM;
 
        /* Locate the panel DT node. */
        port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
        }
 
-       bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
-       if (IS_ERR(bridge))
-               return PTR_ERR(bridge);
+       lvds_encoder->panel_bridge =
+               devm_drm_panel_bridge_add(&pdev->dev,
+                                         panel, DRM_MODE_CONNECTOR_LVDS);
+       if (IS_ERR(lvds_encoder->panel_bridge))
+               return PTR_ERR(lvds_encoder->panel_bridge);
+
+       /* The panel_bridge bridge is attached to the panel's of_node,
+        * but we need a bridge attached to our of_node for our user
+        * to look up.
+        */
+       lvds_encoder->bridge.of_node = pdev->dev.of_node;
+       lvds_encoder->bridge.funcs = &funcs;
+       drm_bridge_add(&lvds_encoder->bridge);
 
-       platform_set_drvdata(pdev, bridge);
+       platform_set_drvdata(pdev, lvds_encoder);
 
        return 0;
 }
 
 static int lvds_encoder_remove(struct platform_device *pdev)
 {
-       struct drm_bridge *bridge = platform_get_drvdata(pdev);
+       struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
 
-       drm_bridge_remove(bridge);
+       drm_bridge_remove(&lvds_encoder->bridge);
 
        return 0;
 }
index bf14214..b72259b 100644 (file)
@@ -138,6 +138,7 @@ struct dw_hdmi {
        struct device *dev;
        struct clk *isfr_clk;
        struct clk *iahb_clk;
+       struct clk *cec_clk;
        struct dw_hdmi_i2c *i2c;
 
        struct hdmi_data_info hdmi_data;
@@ -2382,6 +2383,26 @@ __dw_hdmi_probe(struct platform_device *pdev,
                goto err_isfr;
        }
 
+       hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
+       if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
+               hdmi->cec_clk = NULL;
+       } else if (IS_ERR(hdmi->cec_clk)) {
+               ret = PTR_ERR(hdmi->cec_clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
+                               ret);
+
+               hdmi->cec_clk = NULL;
+               goto err_iahb;
+       } else {
+               ret = clk_prepare_enable(hdmi->cec_clk);
+               if (ret) {
+                       dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
+                               ret);
+                       goto err_iahb;
+               }
+       }
+
        /* Product and revision IDs */
        hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
                      | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
@@ -2518,6 +2539,8 @@ err_iahb:
                cec_notifier_put(hdmi->cec_notifier);
 
        clk_disable_unprepare(hdmi->iahb_clk);
+       if (hdmi->cec_clk)
+               clk_disable_unprepare(hdmi->cec_clk);
 err_isfr:
        clk_disable_unprepare(hdmi->isfr_clk);
 err_res:
@@ -2541,6 +2564,8 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
 
        clk_disable_unprepare(hdmi->iahb_clk);
        clk_disable_unprepare(hdmi->isfr_clk);
+       if (hdmi->cec_clk)
+               clk_disable_unprepare(hdmi->cec_clk);
 
        if (hdmi->i2c)
                i2c_del_adapter(&hdmi->i2c->adap);
index 8571cfd..8636e7e 100644 (file)
@@ -97,7 +97,7 @@
 #define DP0_ACTIVEVAL          0x0650
 #define DP0_SYNCVAL            0x0654
 #define DP0_MISC               0x0658
-#define TU_SIZE_RECOMMENDED            (0x3f << 16) /* LSCLK cycles per TU */
+#define TU_SIZE_RECOMMENDED            (63) /* LSCLK cycles per TU */
 #define BPC_6                          (0 << 5)
 #define BPC_8                          (1 << 5)
 
@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
                                tmp = (tmp << 8) | buf[i];
                        i++;
                        if (((i % 4) == 0) || (i == size)) {
-                               tc_write(DP0_AUXWDATA(i >> 2), tmp);
+                               tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
                                tmp = 0;
                        }
                }
@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
        ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
        if (ret < 0)
                goto err_dpcd_read;
-       if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
-               goto err_dpcd_inval;
+       if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+               dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
+               tc->link.base.rate = 270000;
+       }
+
+       if (tc->link.base.num_lanes > 2) {
+               dev_dbg(tc->dev, "Falling to 2 lanes\n");
+               tc->link.base.num_lanes = 2;
+       }
 
        ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
        if (ret < 0)
@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
 err_dpcd_read:
        dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
        return ret;
-err_dpcd_inval:
-       dev_err(tc->dev, "invalid DPCD\n");
-       return -EINVAL;
 }
 
 static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        int lower_margin = mode->vsync_start - mode->vdisplay;
        int vsync_len = mode->vsync_end - mode->vsync_start;
 
+       /*
+        * Recommended maximum number of symbols transferred in a transfer unit:
+        * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+        *              (output active video bandwidth in bytes))
+        * Must be less than tu_size.
+        */
+       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
        dev_dbg(tc->dev, "set mode %dx%d\n",
                mode->hdisplay, mode->vdisplay);
        dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
 
 
-       /* LCD Ctl Frame Size */
-       tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+       /*
+        * LCD Ctl Frame Size
+        * datasheet is not clear of vsdelay in case of DPI
+        * assume we do not need any delay when DPI is a source of
+        * sync signals
+        */
+       tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
                 OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
-       tc_write(HTIM01, (left_margin << 16) |          /* H back porch */
-                        (hsync_len << 0));             /* Hsync */
-       tc_write(HTIM02, (right_margin << 16) |         /* H front porch */
-                        (mode->hdisplay << 0));        /* width */
+       tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
+                        (ALIGN(hsync_len, 2) << 0));    /* Hsync */
+       tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) |  /* H front porch */
+                        (ALIGN(mode->hdisplay, 2) << 0)); /* width */
        tc_write(VTIM01, (upper_margin << 16) |         /* V back porch */
                         (vsync_len << 0));             /* Vsync */
        tc_write(VTIM02, (lower_margin << 16) |         /* V front porch */
@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        /* DP Main Stream Attributes */
        vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
        tc_write(DP0_VIDSYNCDELAY,
-                (0x003e << 16) |       /* thresh_dly */
+                (max_tu_symbol << 16) |        /* thresh_dly */
                 (vid_sync_dly << 0));
 
        tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
                 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
 
-       /*
-        * Recommended maximum number of symbols transferred in a transfer unit:
-        * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
-        *              (output active video bandwidth in bytes))
-        * Must be less than tu_size.
-        */
-       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
-       tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+       tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+                          BPC_8);
 
        return 0;
 err:
@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
        unsigned int rate;
        u32 dp_phy_ctrl;
        int timeout;
-       bool aligned;
-       bool ready;
        u32 value;
        int ret;
        u8 tmp[8];
@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
                ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
                if (ret < 0)
                        goto err_dpcd_read;
-               ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
-                                    DP_CHANNEL_EQ_BITS));      /* Lane0 */
-               aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
-       } while ((--timeout) && !(ready && aligned));
+       } while ((--timeout) &&
+                !(drm_dp_channel_eq_ok(tmp + 2,  tc->link.base.num_lanes)));
 
        if (timeout == 0) {
                /* Read DPCD 0x200-0x201 */
                ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
                if (ret < 0)
                        goto err_dpcd_read;
+               dev_err(dev, "channel(s) EQ not ok\n");
                dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
                dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
                         tmp[1]);
@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
                dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
                         tmp[6]);
 
-               if (!ready)
-                       dev_err(dev, "Lane0/1 not ready\n");
-               if (!aligned)
-                       dev_err(dev, "Lane0/1 not aligned\n");
                return -EAGAIN;
        }
 
@@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
 static int tc_connector_mode_valid(struct drm_connector *connector,
                                   struct drm_display_mode *mode)
 {
-       /* Accept any mode */
+       /* DPI interface clock limitation: upto 154 MHz */
+       if (mode->clock > 154000)
+               return MODE_CLOCK_HIGH;
+
        return MODE_OK;
 }
 
index 71d712f..b16f1d6 100644 (file)
@@ -1225,7 +1225,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
                return;
 
        for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
-               if (!new_crtc_state->active || !new_crtc_state->planes_changed)
+               if (!new_crtc_state->active)
                        continue;
 
                ret = drm_crtc_vblank_get(crtc);
index 25f4b2e..9ae2360 100644 (file)
@@ -152,6 +152,25 @@ static void drm_connector_free(struct kref *kref)
        connector->funcs->destroy(connector);
 }
 
+void drm_connector_free_work_fn(struct work_struct *work)
+{
+       struct drm_connector *connector, *n;
+       struct drm_device *dev =
+               container_of(work, struct drm_device, mode_config.connector_free_work);
+       struct drm_mode_config *config = &dev->mode_config;
+       unsigned long flags;
+       struct llist_node *freed;
+
+       spin_lock_irqsave(&config->connector_list_lock, flags);
+       freed = llist_del_all(&config->connector_free_list);
+       spin_unlock_irqrestore(&config->connector_list_lock, flags);
+
+       llist_for_each_entry_safe(connector, n, freed, free_node) {
+               drm_mode_object_unregister(dev, &connector->base);
+               connector->funcs->destroy(connector);
+       }
+}
+
 /**
  * drm_connector_init - Init a preallocated connector
  * @dev: DRM device
@@ -529,6 +548,25 @@ void drm_connector_list_iter_begin(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_connector_list_iter_begin);
 
+/*
+ * Extra-safe connector put function that works in any context. Should only be
+ * used from the connector_iter functions, where we never really expect to
+ * actually release the connector when dropping our final reference.
+ */
+static void
+__drm_connector_put_safe(struct drm_connector *conn)
+{
+       struct drm_mode_config *config = &conn->dev->mode_config;
+
+       lockdep_assert_held(&config->connector_list_lock);
+
+       if (!refcount_dec_and_test(&conn->base.refcount.refcount))
+               return;
+
+       llist_add(&conn->free_node, &config->connector_free_list);
+       schedule_work(&config->connector_free_work);
+}
+
 /**
  * drm_connector_list_iter_next - return next connector
  * @iter: connectr_list iterator
@@ -558,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
 
                /* loop until it's not a zombie connector */
        } while (!kref_get_unless_zero(&iter->conn->base.refcount));
-       spin_unlock_irqrestore(&config->connector_list_lock, flags);
 
        if (old_conn)
-               drm_connector_put(old_conn);
+               __drm_connector_put_safe(old_conn);
+       spin_unlock_irqrestore(&config->connector_list_lock, flags);
 
        return iter->conn;
 }
@@ -578,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
  */
 void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
 {
+       struct drm_mode_config *config = &iter->dev->mode_config;
+       unsigned long flags;
+
        iter->dev = NULL;
-       if (iter->conn)
-               drm_connector_put(iter->conn);
+       if (iter->conn) {
+               spin_lock_irqsave(&config->connector_list_lock, flags);
+               __drm_connector_put_safe(iter->conn);
+               spin_unlock_irqrestore(&config->connector_list_lock, flags);
+       }
        lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
 }
 EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -1207,6 +1251,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
        if (edid)
                size = EDID_LENGTH * (1 + edid->extensions);
 
+       /* Set the display info, using edid if available, otherwise
+        * reseting the values to defaults. This duplicates the work
+        * done in drm_add_edid_modes, but that function is not
+        * consistently called before this one in all drivers and the
+        * computation is cheap enough that it seems better to
+        * duplicate it rather than attempt to ensure some arbitrary
+        * ordering of calls.
+        */
+       if (edid)
+               drm_add_display_info(connector, edid);
+       else
+               drm_reset_display_info(connector);
+
        drm_object_property_set_value(&connector->base,
                                      dev->mode_config.non_desktop_property,
                                      connector->display_info.non_desktop);
index 9ebb884..af00f42 100644 (file)
@@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
                                    uint64_t value);
 int drm_connector_create_standard_properties(struct drm_device *dev);
 const char *drm_get_connector_force_name(enum drm_connector_force force);
+void drm_connector_free_work_fn(struct work_struct *work);
 
 /* IOCTL */
 int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
index 2e8fb51..cb48714 100644 (file)
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
  *
  * Returns true if @vendor is in @edid, false otherwise
  */
-static bool edid_vendor(struct edid *edid, const char *vendor)
+static bool edid_vendor(const struct edid *edid, const char *vendor)
 {
        char edid_vendor[3];
 
@@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
  *
  * This tells subsequent routines what fixes they need to apply.
  */
-static u32 edid_get_quirks(struct edid *edid)
+static u32 edid_get_quirks(const struct edid *edid)
 {
        const struct edid_quirk *quirk;
        int i;
@@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 /*
  * Search EDID for CEA extension block.
  */
-static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
+static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
 {
        u8 *edid_ext = NULL;
        int i;
@@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
        return edid_ext;
 }
 
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(const struct edid *edid)
 {
        return drm_find_edid_extension(edid, CEA_EXT);
 }
 
-static u8 *drm_find_displayid_extension(struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid)
 {
        return drm_find_edid_extension(edid, DISPLAYID_EXT);
 }
@@ -4363,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
 }
 
 static void drm_parse_cea_ext(struct drm_connector *connector,
-                             struct edid *edid)
+                             const struct edid *edid)
 {
        struct drm_display_info *info = &connector->display_info;
        const u8 *edid_ext;
@@ -4397,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
        }
 }
 
-static void drm_add_display_info(struct drm_connector *connector,
-                                struct edid *edid, u32 quirks)
+/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
+ * all of the values which would have been set from EDID
+ */
+void
+drm_reset_display_info(struct drm_connector *connector)
+{
+       struct drm_display_info *info = &connector->display_info;
+
+       info->width_mm = 0;
+       info->height_mm = 0;
+
+       info->bpc = 0;
+       info->color_formats = 0;
+       info->cea_rev = 0;
+       info->max_tmds_clock = 0;
+       info->dvi_dual = false;
+
+       info->non_desktop = 0;
+}
+EXPORT_SYMBOL_GPL(drm_reset_display_info);
+
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
 {
        struct drm_display_info *info = &connector->display_info;
 
+       u32 quirks = edid_get_quirks(edid);
+
        info->width_mm = edid->width_cm * 10;
        info->height_mm = edid->height_cm * 10;
 
@@ -4414,11 +4436,13 @@ static void drm_add_display_info(struct drm_connector *connector,
 
        info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
 
+       DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
+
        if (edid->revision < 3)
-               return;
+               return quirks;
 
        if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
-               return;
+               return quirks;
 
        drm_parse_cea_ext(connector, edid);
 
@@ -4438,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector,
 
        /* Only defined for 1.4 with digital displays */
        if (edid->revision < 4)
-               return;
+               return quirks;
 
        switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
        case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4473,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector,
                info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
        if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
                info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+       return quirks;
 }
+EXPORT_SYMBOL_GPL(drm_add_display_info);
 
 static int validate_displayid(u8 *displayid, int length, int idx)
 {
@@ -4627,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
                return 0;
        }
 
-       quirks = edid_get_quirks(edid);
-
        /*
         * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
         * To avoid multiple parsing of same block, lets parse that map
         * from sink info, before parsing CEA modes.
         */
-       drm_add_display_info(connector, edid, quirks);
+       quirks = drm_add_display_info(connector, edid);
 
        /*
         * EDID spec says modes should be preferred in this order:
@@ -4831,7 +4855,8 @@ void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
                                   const struct drm_display_mode *mode,
                                   enum hdmi_quantization_range rgb_quant_range,
-                                  bool rgb_quant_range_selectable)
+                                  bool rgb_quant_range_selectable,
+                                  bool is_hdmi2_sink)
 {
        /*
         * CEA-861:
@@ -4855,8 +4880,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
         *  YQ-field to match the RGB Quantization Range being transmitted
         *  (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
         *  set YQ=1) and the Sink shall ignore the YQ-field."
+        *
+        * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
+        * by non-zero YQ when receiving RGB. There doesn't seem to be any
+        * good way to tell which version of CEA-861 the sink supports, so
+        * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
+        * on on CEA-861-F.
         */
-       if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
+       if (!is_hdmi2_sink ||
+           rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
                frame->ycc_quantization_range =
                        HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
        else
index 0737400..e561663 100644 (file)
@@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 
        if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
                DRM_INFO("Cannot find any crtc or sizes\n");
+
+               /* First time: disable all crtc's.. */
+               if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
+                       restore_fbdev_mode(fb_helper);
                return -EAGAIN;
        }
 
index d1eb56a..1402c0e 100644 (file)
@@ -220,17 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
 
        mutex_lock(&dev->mode_config.idr_mutex);
 
-       /* Insert the new lessee into the tree */
-       id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
-       if (id < 0) {
-               error = id;
-               goto out_lessee;
-       }
-
-       lessee->lessee_id = id;
-       lessee->lessor = drm_master_get(lessor);
-       list_add_tail(&lessee->lessee_list, &lessor->lessees);
-
        idr_for_each_entry(leases, entry, object) {
                error = 0;
                if (!idr_find(&dev->mode_config.crtc_idr, object))
@@ -246,6 +235,17 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
                }
        }
 
+       /* Insert the new lessee into the tree */
+       id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
+       if (id < 0) {
+               error = id;
+               goto out_lessee;
+       }
+
+       lessee->lessee_id = id;
+       lessee->lessor = drm_master_get(lessor);
+       list_add_tail(&lessee->lessee_list, &lessor->lessees);
+
        /* Move the leases over */
        lessee->leases = *leases;
        DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
@@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
        return lessee;
 
 out_lessee:
-       drm_master_put(&lessee);
-
        mutex_unlock(&dev->mode_config.idr_mutex);
 
+       drm_master_put(&lessee);
+
        return ERR_PTR(error);
 }
 
index 61a1c8e..c3c79ee 100644 (file)
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
  */
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
+       struct drm_mm *mm = old->mm;
+
        DRM_MM_BUG_ON(!old->allocated);
 
        *new = *old;
 
        list_replace(&old->node_list, &new->node_list);
-       rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
+       rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
 
        if (drm_mm_hole_follows(old)) {
                list_replace(&old->hole_stack, &new->hole_stack);
                rb_replace_node(&old->rb_hole_size,
                                &new->rb_hole_size,
-                               &old->mm->holes_size);
+                               &mm->holes_size);
                rb_replace_node(&old->rb_hole_addr,
                                &new->rb_hole_addr,
-                               &old->mm->holes_addr);
+                               &mm->holes_addr);
        }
 
        old->allocated = false;
index cda8bfa..256de73 100644 (file)
@@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
        ida_init(&dev->mode_config.connector_ida);
        spin_lock_init(&dev->mode_config.connector_list_lock);
 
+       init_llist_head(&dev->mode_config.connector_free_list);
+       INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
+
        drm_mode_create_standard_properties(dev);
 
        /* Just to be sure */
@@ -431,6 +434,8 @@ void drm_mode_config_cleanup(struct drm_device *dev)
                drm_connector_put(connector);
        }
        drm_connector_list_iter_end(&conn_iter);
+       /* connector_iter drops references in a work item. */
+       flush_work(&dev->mode_config.connector_free_work);
        if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
                drm_connector_list_iter_begin(dev, &conn_iter);
                drm_for_each_connector_iter(connector, &conn_iter)
index 19404e3..2c90519 100644 (file)
@@ -558,11 +558,10 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
 }
 
 /*
- * setplane_internal - setplane handler for internal callers
+ * __setplane_internal - setplane handler for internal callers
  *
- * Note that we assume an extra reference has already been taken on fb.  If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
+ * This function will take a reference on the new fb for the plane
+ * on success.
  *
  * src_{x,y,w,h} are provided in 16.16 fixed point format
  */
@@ -630,14 +629,12 @@ static int __setplane_internal(struct drm_plane *plane,
        if (!ret) {
                plane->crtc = crtc;
                plane->fb = fb;
-               fb = NULL;
+               drm_framebuffer_get(plane->fb);
        } else {
                plane->old_fb = NULL;
        }
 
 out:
-       if (fb)
-               drm_framebuffer_put(fb);
        if (plane->old_fb)
                drm_framebuffer_put(plane->old_fb);
        plane->old_fb = NULL;
@@ -685,6 +682,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
        struct drm_plane *plane;
        struct drm_crtc *crtc = NULL;
        struct drm_framebuffer *fb = NULL;
+       int ret;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
@@ -717,15 +715,16 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
                }
        }
 
-       /*
-        * setplane_internal will take care of deref'ing either the old or new
-        * framebuffer depending on success.
-        */
-       return setplane_internal(plane, crtc, fb,
-                                plane_req->crtc_x, plane_req->crtc_y,
-                                plane_req->crtc_w, plane_req->crtc_h,
-                                plane_req->src_x, plane_req->src_y,
-                                plane_req->src_w, plane_req->src_h);
+       ret = setplane_internal(plane, crtc, fb,
+                               plane_req->crtc_x, plane_req->crtc_y,
+                               plane_req->crtc_w, plane_req->crtc_h,
+                               plane_req->src_x, plane_req->src_y,
+                               plane_req->src_w, plane_req->src_h);
+
+       if (fb)
+               drm_framebuffer_put(fb);
+
+       return ret;
 }
 
 static int drm_mode_cursor_universal(struct drm_crtc *crtc,
@@ -788,13 +787,12 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                src_h = fb->height << 16;
        }
 
-       /*
-        * setplane_internal will take care of deref'ing either the old or new
-        * framebuffer depending on success.
-        */
        ret = __setplane_internal(crtc->cursor, crtc, fb,
-                               crtc_x, crtc_y, crtc_w, crtc_h,
-                               0, 0, src_w, src_h, ctx);
+                                 crtc_x, crtc_y, crtc_w, crtc_h,
+                                 0, 0, src_w, src_h, ctx);
+
+       if (fb)
+               drm_framebuffer_put(fb);
 
        /* Update successful; save new cursor position, if necessary */
        if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
@@ -1030,6 +1028,7 @@ retry:
                e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
                e->event.base.length = sizeof(e->event);
                e->event.vbl.user_data = page_flip->user_data;
+               e->event.vbl.crtc_id = crtc->base.id;
                ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
                if (ret) {
                        kfree(e);
index f776fc1..cb4d09c 100644 (file)
@@ -369,40 +369,26 @@ static const struct file_operations drm_syncobj_file_fops = {
        .release = drm_syncobj_file_release,
 };
 
-static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
-{
-       struct file *file = anon_inode_getfile("syncobj_file",
-                                              &drm_syncobj_file_fops,
-                                              syncobj, 0);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       drm_syncobj_get(syncobj);
-       if (cmpxchg(&syncobj->file, NULL, file)) {
-               /* lost the race */
-               fput(file);
-       }
-
-       return 0;
-}
-
 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
 {
-       int ret;
+       struct file *file;
        int fd;
 
        fd = get_unused_fd_flags(O_CLOEXEC);
        if (fd < 0)
                return fd;
 
-       if (!syncobj->file) {
-               ret = drm_syncobj_alloc_file(syncobj);
-               if (ret) {
-                       put_unused_fd(fd);
-                       return ret;
-               }
+       file = anon_inode_getfile("syncobj_file",
+                                 &drm_syncobj_file_fops,
+                                 syncobj, 0);
+       if (IS_ERR(file)) {
+               put_unused_fd(fd);
+               return PTR_ERR(file);
        }
-       fd_install(fd, syncobj->file);
+
+       drm_syncobj_get(syncobj);
+       fd_install(fd, file);
+
        *p_fd = fd;
        return 0;
 }
@@ -422,31 +408,24 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
        return ret;
 }
 
-static struct drm_syncobj *drm_syncobj_fdget(int fd)
-{
-       struct file *file = fget(fd);
-
-       if (!file)
-               return NULL;
-       if (file->f_op != &drm_syncobj_file_fops)
-               goto err;
-
-       return file->private_data;
-err:
-       fput(file);
-       return NULL;
-};
-
 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
                                    int fd, u32 *handle)
 {
-       struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
+       struct drm_syncobj *syncobj;
+       struct file *file;
        int ret;
 
-       if (!syncobj)
+       file = fget(fd);
+       if (!file)
                return -EINVAL;
 
+       if (file->f_op != &drm_syncobj_file_fops) {
+               fput(file);
+               return -EINVAL;
+       }
+
        /* take a reference to put in the idr */
+       syncobj = file->private_data;
        drm_syncobj_get(syncobj);
 
        idr_preload(GFP_KERNEL);
@@ -455,12 +434,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
        spin_unlock(&file_private->syncobj_table_lock);
        idr_preload_end();
 
-       if (ret < 0) {
-               fput(syncobj->file);
-               return ret;
-       }
-       *handle = ret;
-       return 0;
+       if (ret > 0) {
+               *handle = ret;
+               ret = 0;
+       } else
+               drm_syncobj_put(syncobj);
+
+       fput(file);
+       return ret;
 }
 
 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
index 82b7242..27e423b 100644 (file)
@@ -37,8 +37,6 @@
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
-static struct device *exynos_drm_get_dma_device(void);
-
 int exynos_atomic_check(struct drm_device *dev,
                        struct drm_atomic_state *state)
 {
@@ -148,7 +146,7 @@ static struct drm_driver exynos_drm_driver = {
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_export       = drm_gem_prime_export,
-       .gem_prime_import       = drm_gem_prime_import,
+       .gem_prime_import       = exynos_drm_gem_prime_import,
        .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
        .gem_prime_import_sg_table      = exynos_drm_gem_prime_import_sg_table,
        .gem_prime_vmap         = exynos_drm_gem_prime_vmap,
@@ -301,6 +299,27 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
        return match ?: ERR_PTR(-ENODEV);
 }
 
+static struct device *exynos_drm_get_dma_device(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+               struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+               struct device *dev;
+
+               if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
+                       continue;
+
+               while ((dev = bus_find_device(&platform_bus_type, NULL,
+                                           &info->driver->driver,
+                                           (void *)platform_bus_type.match))) {
+                       put_device(dev);
+                       return dev;
+               }
+       }
+       return NULL;
+}
+
 static int exynos_drm_bind(struct device *dev)
 {
        struct exynos_drm_private *private;
@@ -469,27 +488,6 @@ static struct platform_driver exynos_drm_platform_driver = {
        },
 };
 
-static struct device *exynos_drm_get_dma_device(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
-               struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
-               struct device *dev;
-
-               if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
-                       continue;
-
-               while ((dev = bus_find_device(&platform_bus_type, NULL,
-                                           &info->driver->driver,
-                                           (void *)platform_bus_type.match))) {
-                       put_device(dev);
-                       return dev;
-               }
-       }
-       return NULL;
-}
-
 static void exynos_drm_unregister_devices(void)
 {
        int i;
index c6847fa..589d465 100644 (file)
@@ -194,11 +194,6 @@ struct drm_exynos_file_private {
 /*
  * Exynos drm private structure.
  *
- * @da_start: start address to device address space.
- *     with iommu, device address space starts from this address
- *     otherwise default one.
- * @da_space_size: size of device address space.
- *     if 0 then default value is used for it.
  * @pending: the crtcs that have pending updates to finish
  * @lock: protect access to @pending
  * @wait: wait an atomic commit to finish
index 077de01..11cc01b 100644 (file)
@@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
        if (IS_ERR(exynos_gem))
                return exynos_gem;
 
+       if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
+               /*
+                * when no IOMMU is available, all allocated buffers are
+                * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
+                */
+               flags &= ~EXYNOS_BO_NONCONTIG;
+               DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
+       }
+
        /* set memory type and cache attribute from user side. */
        exynos_gem->flags = flags;
 
@@ -506,6 +515,12 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 
 /* low-level interface prime helpers */
+struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
+                                           struct dma_buf *dma_buf)
+{
+       return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
+}
+
 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
index e86d1a9..5a4c7de 100644 (file)
@@ -117,6 +117,8 @@ int exynos_drm_gem_fault(struct vm_fault *vmf);
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
 /* low-level interface prime helpers */
+struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
+                                           struct dma_buf *dma_buf);
 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *
 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
index ab19545..4ce2e6b 100644 (file)
@@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
+       unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+       u32 new = *(u32 *)(p_data);
+
+       if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
+               /* We don't have rom, return size of 0. */
+               *pval = 0;
+       else
+               vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
+       return 0;
+}
+
 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
        void *p_data, unsigned int bytes)
 {
@@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        }
 
        switch (rounddown(offset, 4)) {
+       case PCI_ROM_ADDRESS:
+               if (WARN_ON(!IS_ALIGNED(offset, 4)))
+                       return -EINVAL;
+               return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
+
        case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
                if (WARN_ON(!IS_ALIGNED(offset, 4)))
                        return -EINVAL;
@@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
                                pci_resource_len(gvt->dev_priv->drm.pdev, 0);
        vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
                                pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+
+       memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
 }
 
 /**
index 701a3c6..49af946 100644 (file)
@@ -1628,7 +1628,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
        struct intel_shadow_bb_entry *entry_obj;
        struct intel_vgpu *vgpu = s->vgpu;
        unsigned long gma = 0;
-       uint32_t bb_size;
+       int bb_size;
        void *dst = NULL;
        int ret = 0;
 
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 }
 
 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
-               unsigned int opcode, int rings)
+               unsigned int opcode, unsigned long rings)
 {
        struct cmd_info *info = NULL;
        unsigned int ring;
 
-       for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+       for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
                info = find_cmd_entry(gvt, opcode, ring);
                if (info)
                        break;
index 3c31843..309f3fa 100644 (file)
@@ -266,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
        /* Clear host CRT status, so guest couldn't detect this host CRT. */
        if (IS_BROADWELL(dev_priv))
                vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+
+       vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
 }
 
 static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -307,6 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
        port->type = type;
 
        emulate_monitor_status_change(vgpu);
+
        return 0;
 }
 
index 4427be1..940cdaa 100644 (file)
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
                goto err_unpin_mm;
        }
 
+       ret = intel_gvt_generate_request(workload);
+       if (ret) {
+               gvt_vgpu_err("fail to generate request\n");
+               goto err_unpin_mm;
+       }
+
        ret = prepare_shadow_batch_buffer(workload);
        if (ret) {
                gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
index 2801d70..64d67ff 100644 (file)
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
 
 #define GTT_HAW 46
 
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
 
 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
 {
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
                        return ret;
        } else {
                if (!test_bit(index, spt->post_shadow_bitmap)) {
+                       int type = spt->shadow_page.type;
+
                        ppgtt_get_shadow_entry(spt, &se, index);
                        ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
                        if (ret)
                                return ret;
+                       ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
+                       ppgtt_set_shadow_entry(spt, &se, index);
                }
-
                ppgtt_set_post_shadow(spt, index);
        }
 
index a5bed2e..1f840f6 100644 (file)
@@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt,
        return 0;
 }
 
-static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+/**
+ * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * Ring ID on success, negative error code if failed.
+ */
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+               unsigned int offset)
 {
        enum intel_engine_id id;
        struct intel_engine_cs *engine;
 
-       reg &= ~GENMASK(11, 0);
+       offset &= ~GENMASK(11, 0);
        for_each_engine(engine, gvt->dev_priv, id) {
-               if (engine->mmio_base == reg)
+               if (engine->mmio_base == offset)
                        return id;
        }
-       return -1;
+       return -ENODEV;
 }
 
 #define offset_to_fence_num(offset) \
@@ -1381,40 +1390,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
        return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
 }
 
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes)
-{
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       u32 v = *(u32 *)p_data;
-
-       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
-               return intel_vgpu_default_mmio_write(vgpu,
-                               offset, p_data, bytes);
-
-       switch (offset) {
-       case 0x4ddc:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
-               break;
-       case 0x42080:
-               /* bypass WaCompressedResourceDisplayNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
-               break;
-       case 0xe194:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
-               break;
-       case 0x7014:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
@@ -1432,18 +1407,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       int ring_id;
+       u32 ring_base;
+
+       ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
+       /**
+        * Read HW reg in following case
+        * a. the offset isn't a ring mmio
+        * b. the offset's ring is running on hw.
+        * c. the offset is ring time stamp mmio
+        */
+       if (ring_id >= 0)
+               ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+       if (ring_id < 0 || vgpu  == gvt->scheduler.engine_owner[ring_id] ||
+           offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
+           offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
+               mmio_hw_access_pre(dev_priv);
+               vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+               mmio_hw_access_post(dev_priv);
+       }
 
-       mmio_hw_access_pre(dev_priv);
-       vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
-       mmio_hw_access_post(dev_priv);
        return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 }
 
 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
-       int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+       int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
        struct intel_vgpu_execlist *execlist;
        u32 data = *(u32 *)p_data;
        int ret = 0;
@@ -1470,7 +1463,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        u32 data = *(u32 *)p_data;
-       int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+       int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
        bool enable_execlist;
 
        write_vreg(vgpu, offset, p_data, bytes);
@@ -1671,8 +1664,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
-       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
-                skl_misc_ctl_write);
+       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+                NULL, NULL);
        MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2557,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x6e570, D_BDW_PLUS);
        MMIO_D(0x65f10, D_BDW_PLUS);
 
-       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
-                skl_misc_ctl_write);
+       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2607,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
-       MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+       MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+       MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
        MMIO_D(0x45504, D_SKL_PLUS);
        MMIO_D(0x45520, D_SKL_PLUS);
        MMIO_D(0x46000, D_SKL_PLUS);
index 32cd64d..dbc04ad 100644 (file)
@@ -65,6 +65,8 @@ struct intel_gvt_mmio_info {
        struct hlist_node node;
 };
 
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+               unsigned int reg);
 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
 bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
 
index f6ded47..69f8f0d 100644 (file)
@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
        return i915_gem_context_force_single_submission(req->ctx);
 }
 
+static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+       i915_reg_t reg;
+
+       reg = RING_INSTDONE(ring_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+       reg = RING_ACTHD(ring_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+       reg = RING_ACTHD_UDW(ring_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+}
+
 static int shadow_context_status_change(struct notifier_block *nb,
                unsigned long action, void *data)
 {
@@ -140,9 +154,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        enum intel_engine_id ring_id = req->engine->id;
        struct intel_vgpu_workload *workload;
+       unsigned long flags;
 
        if (!is_gvt_request(req)) {
-               spin_lock_bh(&scheduler->mmio_context_lock);
+               spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (action == INTEL_CONTEXT_SCHEDULE_IN &&
                    scheduler->engine_owner[ring_id]) {
                        /* Switch ring from vGPU to host. */
@@ -150,7 +165,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
                                              NULL, ring_id);
                        scheduler->engine_owner[ring_id] = NULL;
                }
-               spin_unlock_bh(&scheduler->mmio_context_lock);
+               spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
 
                return NOTIFY_OK;
        }
@@ -161,7 +176,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 
        switch (action) {
        case INTEL_CONTEXT_SCHEDULE_IN:
-               spin_lock_bh(&scheduler->mmio_context_lock);
+               spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (workload->vgpu != scheduler->engine_owner[ring_id]) {
                        /* Switch ring from host to vGPU or vGPU to vGPU. */
                        intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,13 +185,16 @@ static int shadow_context_status_change(struct notifier_block *nb,
                } else
                        gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
                                      ring_id, workload->vgpu->id);
-               spin_unlock_bh(&scheduler->mmio_context_lock);
+               spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
                atomic_set(&workload->shadow_ctx_active, 1);
                break;
        case INTEL_CONTEXT_SCHEDULE_OUT:
-       case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+               save_ring_hw_state(workload->vgpu, ring_id);
                atomic_set(&workload->shadow_ctx_active, 0);
                break;
+       case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+               save_ring_hw_state(workload->vgpu, ring_id);
+               break;
        default:
                WARN_ON(1);
                return NOTIFY_OK;
@@ -253,7 +271,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct drm_i915_gem_request *rq;
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_ring *ring;
        int ret;
@@ -299,6 +316,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        ret = populate_shadow_context(workload);
        if (ret)
                goto err_unpin;
+       workload->shadowed = true;
+       return 0;
+
+err_unpin:
+       engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+       release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+       return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+       int ring_id = workload->ring_id;
+       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+       struct drm_i915_gem_request *rq;
+       struct intel_vgpu *vgpu = workload->vgpu;
+       struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+       int ret;
 
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
@@ -313,14 +350,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        ret = copy_workload_to_ring_buffer(workload);
        if (ret)
                goto err_unpin;
-       workload->shadowed = true;
        return 0;
 
 err_unpin:
        engine->context_unpin(engine, shadow_ctx);
-err_shadow:
        release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
        return ret;
 }
 
@@ -723,6 +757,9 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
        if (IS_ERR(vgpu->shadow_ctx))
                return PTR_ERR(vgpu->shadow_ctx);
 
+       if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
+               vgpu->shadow_ctx->priority = INT_MAX;
+
        vgpu->shadow_ctx->engine[RCS].initialised = true;
 
        bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
index 2d694f6..b9f8722 100644 (file)
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
 
 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
 #endif
index 960d3d8..2cf10d1 100644 (file)
@@ -1714,6 +1714,7 @@ static int i915_drm_resume(struct drm_device *dev)
        intel_guc_resume(dev_priv);
 
        intel_modeset_init_hw(dev);
+       intel_init_clock_gating(dev_priv);
 
        spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
@@ -2618,6 +2619,8 @@ static int intel_runtime_resume(struct device *kdev)
                ret = vlv_resume_prepare(dev_priv, true);
        }
 
+       intel_uncore_runtime_resume(dev_priv);
+
        /*
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
index 54b5d4c..e143004 100644 (file)
@@ -2368,6 +2368,9 @@ struct drm_i915_private {
         */
        struct workqueue_struct *wq;
 
+       /* ordered wq for modesets */
+       struct workqueue_struct *modeset_wq;
+
        /* Display functions */
        struct drm_i915_display_funcs display;
 
index 3a140ee..5cfba89 100644 (file)
@@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
         * must wait for all rendering to complete to the object (as unbinding
         * must anyway), and retire the requests.
         */
-       ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE |
-                                  I915_WAIT_LOCKED |
-                                  I915_WAIT_ALL,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
+       ret = i915_gem_object_set_to_cpu_domain(obj, false);
        if (ret)
                return ret;
 
-       i915_gem_retire_requests(to_i915(obj->base.dev));
-
        while ((vma = list_first_entry_or_null(&obj->vma_list,
                                               struct i915_vma,
                                               obj_link))) {
@@ -474,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
        struct drm_i915_gem_request *rq;
        struct intel_engine_cs *engine;
 
-       if (!dma_fence_is_i915(fence))
+       if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
                return;
 
        rq = to_request(fence);
@@ -4712,17 +4705,19 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
         * state. Fortunately, the kernel_context is disposable and we do
         * not rely on its state.
         */
-       ret = i915_gem_switch_to_kernel_context(dev_priv);
-       if (ret)
-               goto err_unlock;
+       if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+               ret = i915_gem_switch_to_kernel_context(dev_priv);
+               if (ret)
+                       goto err_unlock;
 
-       ret = i915_gem_wait_for_idle(dev_priv,
-                                    I915_WAIT_INTERRUPTIBLE |
-                                    I915_WAIT_LOCKED);
-       if (ret && ret != -EIO)
-               goto err_unlock;
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE |
+                                            I915_WAIT_LOCKED);
+               if (ret && ret != -EIO)
+                       goto err_unlock;
 
-       assert_kernel_context_is_current(dev_priv);
+               assert_kernel_context_is_current(dev_priv);
+       }
        i915_gem_contexts_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
@@ -4946,8 +4941,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-
        /*
         * We need to fallback to 4K pages since gvt gtt handling doesn't
         * support huge page entries - we will need to check either hypervisor
@@ -4967,18 +4960,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
                dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
        }
 
+       ret = i915_gem_init_userptr(dev_priv);
+       if (ret)
+               return ret;
+
        /* This is just a security blanket to placate dragons.
         * On some systems, we very sporadically observe that the first TLBs
         * used by the CS may be stale, despite us poking the TLB reset. If
         * we hold the forcewake during initialisation these problems
         * just magically go away.
         */
+       mutex_lock(&dev_priv->drm.struct_mutex);
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       ret = i915_gem_init_userptr(dev_priv);
-       if (ret)
-               goto out_unlock;
-
        ret = i915_gem_init_ggtt(dev_priv);
        if (ret)
                goto out_unlock;
index 135fc75..382a77a 100644 (file)
@@ -172,7 +172,9 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        spin_lock_init(&mn->lock);
        mn->mn.ops = &i915_gem_userptr_notifier;
        mn->objects = RB_ROOT_CACHED;
-       mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+       mn->wq = alloc_workqueue("i915-userptr-release",
+                                WQ_UNBOUND | WQ_MEM_RECLAIM,
+                                0);
        if (mn->wq == NULL) {
                kfree(mn);
                return ERR_PTR(-ENOMEM);
@@ -827,7 +829,7 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
 
        dev_priv->mm.userptr_wq =
                alloc_workqueue("i915-userptr-acquire",
-                               WQ_HIGHPRI | WQ_MEM_RECLAIM,
+                               WQ_HIGHPRI | WQ_UNBOUND,
                                0);
        if (!dev_priv->mm.userptr_wq)
                return -ENOMEM;
index e299385..888b7d3 100644 (file)
@@ -52,7 +52,8 @@ int i915_gemfs_init(struct drm_i915_private *i915)
 
        if (has_transparent_hugepage()) {
                struct super_block *sb = gemfs->mnt_sb;
-               char options[] = "huge=within_size";
+               /* FIXME: Disabled until we get W/A for read BW issue. */
+               char options[] = "huge=never";
                int flags = 0;
                int err;
 
index 68a58cc..7923dfd 100644 (file)
@@ -2951,9 +2951,6 @@ enum i915_power_well_id {
 #define ILK_DPFC_CHICKEN       _MMIO(0x43224)
 #define   ILK_DPFC_DISABLE_DUMMY0 (1<<8)
 #define   ILK_DPFC_NUKE_ON_ANY_MODIFICATION    (1<<23)
-#define   GLK_SKIP_SEG_EN              (1<<12)
-#define   GLK_SKIP_SEG_COUNT_MASK      (3<<10)
-#define   GLK_SKIP_SEG_COUNT(x)                ((x)<<10)
 #define ILK_FBC_RT_BASE                _MMIO(0x2128)
 #define   ILK_FBC_RT_VALID     (1<<0)
 #define   SNB_FBC_FRONT_BUFFER (1<<1)
@@ -6980,6 +6977,7 @@ enum {
 #define  RESET_PCH_HANDSHAKE_ENABLE    (1<<4)
 
 #define GEN8_CHICKEN_DCPR_1            _MMIO(0x46430)
+#define   SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
 #define   MASK_WAKEMEM                 (1<<13)
 
 #define SKL_DFSM                       _MMIO(0x51000)
@@ -7029,6 +7027,8 @@ enum {
 #define GEN9_SLICE_COMMON_ECO_CHICKEN0         _MMIO(0x7308)
 #define  DISABLE_PIXEL_MASK_CAMMING            (1<<14)
 
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1         _MMIO(0x731c)
+
 #define GEN7_L3SQCREG1                         _MMIO(0xB010)
 #define  VLV_B0_WA_L3SQCREG1_VALUE             0x00D30000
 
@@ -8525,6 +8525,7 @@ enum skl_power_gate {
 #define  BXT_CDCLK_CD2X_DIV_SEL_2      (2<<22)
 #define  BXT_CDCLK_CD2X_DIV_SEL_4      (3<<22)
 #define  BXT_CDCLK_CD2X_PIPE(pipe)     ((pipe)<<20)
+#define  CDCLK_DIVMUX_CD_OVERRIDE      (1<<19)
 #define  BXT_CDCLK_CD2X_PIPE_NONE      BXT_CDCLK_CD2X_PIPE(3)
 #define  BXT_CDCLK_SSA_PRECHARGE_ENABLE        (1<<16)
 #define  CDCLK_FREQ_DECIMAL_MASK       (0x7ff)
index e8ca67a..ac236b8 100644 (file)
@@ -367,6 +367,7 @@ struct i915_sw_dma_fence_cb {
        struct dma_fence *dma;
        struct timer_list timer;
        struct irq_work work;
+       struct rcu_head rcu;
 };
 
 static void timer_i915_sw_fence_wake(struct timer_list *t)
@@ -406,7 +407,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk)
        del_timer_sync(&cb->timer);
        dma_fence_put(cb->dma);
 
-       kfree(cb);
+       kfree_rcu(cb, rcu);
 }
 
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
index 48e1ba0..bcbc7ab 100644 (file)
@@ -186,7 +186,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
        struct intel_wait *wait, *n, *first;
 
        if (!b->irq_armed)
-               return;
+               goto wakeup_signaler;
 
        /* We only disarm the irq when we are idle (all requests completed),
         * so if the bottom-half remains asleep, it missed the request
@@ -208,6 +208,14 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
        b->waiters = RB_ROOT;
 
        spin_unlock_irq(&b->rb_lock);
+
+       /*
+        * The signaling thread may be asleep holding a reference to a request,
+        * that had its signaling cancelled prior to being preempted. We need
+        * to kick the signaler, just in case, to release any such reference.
+        */
+wakeup_signaler:
+       wake_up_process(b->signaler);
 }
 
 static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -517,6 +525,7 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
 
        GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
        rb_erase(&wait->node, &b->waiters);
+       RB_CLEAR_NODE(&wait->node);
 
 out:
        GEM_BUG_ON(b->irq_wait == wait);
@@ -650,23 +659,15 @@ static int intel_breadcrumbs_signaler(void *arg)
                }
 
                if (unlikely(do_schedule)) {
-                       DEFINE_WAIT(exec);
-
                        if (kthread_should_park())
                                kthread_parkme();
 
-                       if (kthread_should_stop()) {
-                               GEM_BUG_ON(request);
+                       if (unlikely(kthread_should_stop())) {
+                               i915_gem_request_put(request);
                                break;
                        }
 
-                       if (request)
-                               add_wait_queue(&request->execute, &exec);
-
                        schedule();
-
-                       if (request)
-                               remove_wait_queue(&request->execute, &exec);
                }
                i915_gem_request_put(request);
        } while (1);
index b2a6d62..60cf4e5 100644 (file)
@@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
 
 static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 {
-       int min_cdclk = skl_calc_cdclk(0, vco);
        u32 val;
 
        WARN_ON(vco != 8100000 && vco != 8640000);
 
-       /* select the minimum CDCLK before enabling DPLL 0 */
-       val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
-       I915_WRITE(CDCLK_CTL, val);
-       POSTING_READ(CDCLK_CTL);
-
        /*
         * We always enable DPLL0 with the lowest link rate possible, but still
         * taking into account the VCO required to operate the eDP panel at the
@@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
 {
        int cdclk = cdclk_state->cdclk;
        int vco = cdclk_state->vco;
-       u32 freq_select, pcu_ack;
+       u32 freq_select, pcu_ack, cdclk_ctl;
        int ret;
 
        WARN_ON((cdclk == 24000) != (vco == 0));
@@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
                return;
        }
 
-       /* set CDCLK_CTL */
+       /* Choose frequency for this cdclk */
        switch (cdclk) {
        case 450000:
        case 432000:
@@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
            dev_priv->cdclk.hw.vco != vco)
                skl_dpll0_disable(dev_priv);
 
+       cdclk_ctl = I915_READ(CDCLK_CTL);
+
+       if (dev_priv->cdclk.hw.vco != vco) {
+               /* Wa Display #1183: skl,kbl,cfl */
+               cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+               cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+               I915_WRITE(CDCLK_CTL, cdclk_ctl);
+       }
+
+       /* Wa Display #1183: skl,kbl,cfl */
+       cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
+       I915_WRITE(CDCLK_CTL, cdclk_ctl);
+       POSTING_READ(CDCLK_CTL);
+
        if (dev_priv->cdclk.hw.vco != vco)
                skl_dpll0_enable(dev_priv, vco);
 
-       I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
+       /* Wa Display #1183: skl,kbl,cfl */
+       cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+       I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+       cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+       I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+       /* Wa Display #1183: skl,kbl,cfl */
+       cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
+       I915_WRITE(CDCLK_CTL, cdclk_ctl);
        POSTING_READ(CDCLK_CTL);
 
        /* inform PCU of the change */
index 933c18f..58a3755 100644 (file)
@@ -2128,9 +2128,12 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
        if (WARN_ON(!pll))
                return;
 
+        mutex_lock(&dev_priv->dpll_lock);
+
        if (IS_CANNONLAKE(dev_priv)) {
                /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
                val = I915_READ(DPCLKA_CFGCR0);
+               val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
                val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
                I915_WRITE(DPCLKA_CFGCR0, val);
 
@@ -2156,6 +2159,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
        } else if (INTEL_INFO(dev_priv)->gen < 9) {
                I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
        }
+
+       mutex_unlock(&dev_priv->dpll_lock);
 }
 
 static void intel_ddi_clk_disable(struct intel_encoder *encoder)
index 878acc4..50f8443 100644 (file)
@@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
        return crtc->config->cpu_transcoder;
 }
 
-static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
+static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+                                   enum pipe pipe)
 {
        i915_reg_t reg = PIPEDSL(pipe);
        u32 line1, line2;
@@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
        msleep(5);
        line2 = I915_READ(reg) & line_mask;
 
-       return line1 == line2;
+       return line1 != line2;
+}
+
+static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /* Wait for the display line to settle/start moving */
+       if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+               DRM_ERROR("pipe %c scanline %s wait timed out\n",
+                         pipe_name(pipe), onoff(state));
+}
+
+static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
+{
+       wait_for_pipe_scanline_moving(crtc, false);
+}
+
+static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
+{
+       wait_for_pipe_scanline_moving(crtc, true);
 }
 
 /*
@@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
-       enum pipe pipe = crtc->pipe;
 
        if (INTEL_GEN(dev_priv) >= 4) {
                i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
                                            100))
                        WARN(1, "pipe_off wait timed out\n");
        } else {
-               /* Wait for the display line to settle */
-               if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
-                       WARN(1, "pipe_off wait timed out\n");
+               intel_wait_for_pipe_scanline_stopped(crtc);
        }
 }
 
@@ -1192,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
             pipe_name(pipe));
 }
 
-static void assert_cursor(struct drm_i915_private *dev_priv,
-                         enum pipe pipe, bool state)
-{
-       bool cur_state;
-
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
-               cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
-       else
-               cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
-
-       I915_STATE_WARN(cur_state != state,
-            "cursor on pipe %c assertion failure (expected %s, current %s)\n",
-                       pipe_name(pipe), onoff(state), onoff(cur_state));
-}
-#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
-#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
-
 void assert_pipe(struct drm_i915_private *dev_priv,
                 enum pipe pipe, bool state)
 {
@@ -1236,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv,
                        pipe_name(pipe), onoff(state), onoff(cur_state));
 }
 
-static void assert_plane(struct drm_i915_private *dev_priv,
-                        enum plane plane, bool state)
+static void assert_plane(struct intel_plane *plane, bool state)
 {
-       u32 val;
-       bool cur_state;
+       bool cur_state = plane->get_hw_state(plane);
 
-       val = I915_READ(DSPCNTR(plane));
-       cur_state = !!(val & DISPLAY_PLANE_ENABLE);
        I915_STATE_WARN(cur_state != state,
-            "plane %c assertion failure (expected %s, current %s)\n",
-                       plane_name(plane), onoff(state), onoff(cur_state));
+                       "%s assertion failure (expected %s, current %s)\n",
+                       plane->base.name, onoff(state), onoff(cur_state));
 }
 
-#define assert_plane_enabled(d, p) assert_plane(d, p, true)
-#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+#define assert_plane_enabled(p) assert_plane(p, true)
+#define assert_plane_disabled(p) assert_plane(p, false)
 
-static void assert_planes_disabled(struct drm_i915_private *dev_priv,
-                                  enum pipe pipe)
+static void assert_planes_disabled(struct intel_crtc *crtc)
 {
-       int i;
-
-       /* Primary planes are fixed to pipes on gen4+ */
-       if (INTEL_GEN(dev_priv) >= 4) {
-               u32 val = I915_READ(DSPCNTR(pipe));
-               I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
-                    "plane %c assertion failure, should be disabled but not\n",
-                    plane_name(pipe));
-               return;
-       }
-
-       /* Need to check both planes against the pipe */
-       for_each_pipe(dev_priv, i) {
-               u32 val = I915_READ(DSPCNTR(i));
-               enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
-                       DISPPLANE_SEL_PIPE_SHIFT;
-               I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
-                    "plane %c assertion failure, should be off on pipe %c but is still active\n",
-                    plane_name(i), pipe_name(pipe));
-       }
-}
-
-static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
-                                   enum pipe pipe)
-{
-       int sprite;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_plane *plane;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               for_each_sprite(dev_priv, pipe, sprite) {
-                       u32 val = I915_READ(PLANE_CTL(pipe, sprite));
-                       I915_STATE_WARN(val & PLANE_CTL_ENABLE,
-                            "plane %d assertion failure, should be off on pipe %c but is still active\n",
-                            sprite, pipe_name(pipe));
-               }
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               for_each_sprite(dev_priv, pipe, sprite) {
-                       u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
-                       I915_STATE_WARN(val & SP_ENABLE,
-                            "sprite %c assertion failure, should be off on pipe %c but is still active\n",
-                            sprite_name(pipe, sprite), pipe_name(pipe));
-               }
-       } else if (INTEL_GEN(dev_priv) >= 7) {
-               u32 val = I915_READ(SPRCTL(pipe));
-               I915_STATE_WARN(val & SPRITE_ENABLE,
-                    "sprite %c assertion failure, should be off on pipe %c but is still active\n",
-                    plane_name(pipe), pipe_name(pipe));
-       } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
-               u32 val = I915_READ(DVSCNTR(pipe));
-               I915_STATE_WARN(val & DVS_ENABLE,
-                    "sprite %c assertion failure, should be off on pipe %c but is still active\n",
-                    plane_name(pipe), pipe_name(pipe));
-       }
+       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+               assert_plane_disabled(plane);
 }
 
 static void assert_vblank_disabled(struct drm_crtc *crtc)
@@ -1899,9 +1849,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
 
        DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
 
-       assert_planes_disabled(dev_priv, pipe);
-       assert_cursor_disabled(dev_priv, pipe);
-       assert_sprites_disabled(dev_priv, pipe);
+       assert_planes_disabled(crtc);
 
        /*
         * A pipe without a PLL won't actually be able to drive bits from
@@ -1936,15 +1884,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
        POSTING_READ(reg);
 
        /*
-        * Until the pipe starts DSL will read as 0, which would cause
-        * an apparent vblank timestamp jump, which messes up also the
-        * frame count when it's derived from the timestamps. So let's
-        * wait for the pipe to start properly before we call
-        * drm_crtc_vblank_on()
+        * Until the pipe starts PIPEDSL reads will return a stale value,
+        * which causes an apparent vblank timestamp jump when PIPEDSL
+        * resets to its proper value. That also messes up the frame count
+        * when it's derived from the timestamps. So let's wait for the
+        * pipe to start properly before we call drm_crtc_vblank_on()
         */
-       if (dev->max_vblank_count == 0 &&
-           wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
-               DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
+       if (dev->max_vblank_count == 0)
+               intel_wait_for_pipe_scanline_moving(crtc);
 }
 
 /**
@@ -1971,9 +1918,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
         * Make sure planes won't keep trying to pump pixels to us,
         * or we might hang the display.
         */
-       assert_planes_disabled(dev_priv, pipe);
-       assert_cursor_disabled(dev_priv, pipe);
-       assert_sprites_disabled(dev_priv, pipe);
+       assert_planes_disabled(crtc);
 
        reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
@@ -2802,6 +2747,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
                      crtc_state->active_planes);
 }
 
+static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+                                        struct intel_plane *plane)
+{
+       struct intel_crtc_state *crtc_state =
+               to_intel_crtc_state(crtc->base.state);
+       struct intel_plane_state *plane_state =
+               to_intel_plane_state(plane->base.state);
+
+       intel_set_plane_visible(crtc_state, plane_state, false);
+
+       if (plane->id == PLANE_PRIMARY)
+               intel_pre_disable_primary_noatomic(&crtc->base);
+
+       trace_intel_disable_plane(&plane->base, crtc);
+       plane->disable_plane(plane, crtc);
+}
+
 static void
 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
                             struct intel_initial_plane_config *plane_config)
@@ -2859,12 +2821,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * simplest solution is to just disable the primary plane now and
         * pretend the BIOS never had it enabled.
         */
-       intel_set_plane_visible(to_intel_crtc_state(crtc_state),
-                               to_intel_plane_state(plane_state),
-                               false);
-       intel_pre_disable_primary_noatomic(&intel_crtc->base);
-       trace_intel_disable_plane(primary, intel_crtc);
-       intel_plane->disable_plane(intel_plane, intel_crtc);
+       intel_plane_disable_noatomic(intel_crtc, intel_plane);
 
        return;
 
@@ -3367,6 +3324,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary,
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
+{
+
+       struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum plane plane = primary->plane;
+       enum pipe pipe = primary->pipe;
+       bool ret;
+
+       /*
+        * Not 100% correct for planes that can move between pipes,
+        * but that's only the case for gen2-4 which don't have any
+        * display power wells.
+        */
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static u32
 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
 {
@@ -4848,7 +4830,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
         * a vblank wait.
         */
 
-       assert_plane_enabled(dev_priv, crtc->plane);
+       assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
        if (IS_BROADWELL(dev_priv)) {
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
@@ -4881,7 +4864,8 @@ void hsw_disable_ips(struct intel_crtc *crtc)
        if (!crtc->config->ips_enabled)
                return;
 
-       assert_plane_enabled(dev_priv, crtc->plane);
+       assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
        if (IS_BROADWELL(dev_priv)) {
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -5881,6 +5865,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        enum intel_display_power_domain domain;
+       struct intel_plane *plane;
        u64 domains;
        struct drm_atomic_state *state;
        struct intel_crtc_state *crtc_state;
@@ -5889,11 +5874,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
        if (!intel_crtc->active)
                return;
 
-       if (crtc->primary->state->visible) {
-               intel_pre_disable_primary_noatomic(crtc);
+       for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+               const struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
 
-               intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
-               crtc->primary->state->visible = false;
+               if (plane_state->base.visible)
+                       intel_plane_disable_noatomic(intel_crtc, plane);
        }
 
        state = drm_atomic_state_alloc(crtc->dev);
@@ -9459,6 +9445,23 @@ static void i845_disable_cursor(struct intel_plane *plane,
        i845_update_cursor(plane, NULL, NULL);
 }
 
+static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
                           const struct intel_plane_state *plane_state)
 {
@@ -9652,6 +9655,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
        i9xx_update_cursor(plane, NULL, NULL);
 }
 
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       /*
+        * Not 100% correct for planes that can move between pipes,
+        * but that's only the case for gen2-3 which don't have any
+        * display power wells.
+        */
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
 
 /* VESA 640x480x72Hz mode to set on the pipe */
 static const struct drm_display_mode load_detect_mode = {
@@ -9926,11 +9951,10 @@ found:
        }
 
        ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+       drm_framebuffer_put(fb);
        if (ret)
                goto fail;
 
-       drm_framebuffer_put(fb);
-
        ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
        if (ret)
                goto fail;
@@ -12527,11 +12551,15 @@ static int intel_atomic_commit(struct drm_device *dev,
        INIT_WORK(&state->commit_work, intel_atomic_commit_work);
 
        i915_sw_fence_commit(&intel_state->commit_ready);
-       if (nonblock)
+       if (nonblock && intel_state->modeset) {
+               queue_work(dev_priv->modeset_wq, &state->commit_work);
+       } else if (nonblock) {
                queue_work(system_unbound_wq, &state->commit_work);
-       else
+       } else {
+               if (intel_state->modeset)
+                       flush_workqueue(dev_priv->modeset_wq);
                intel_atomic_commit_tail(state);
-
+       }
 
        return 0;
 }
@@ -13177,13 +13205,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
        primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
        primary->check_plane = intel_check_primary_plane;
 
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 10) {
                intel_primary_formats = skl_primary_formats;
                num_formats = ARRAY_SIZE(skl_primary_formats);
                modifiers = skl_format_modifiers_ccs;
 
                primary->update_plane = skl_update_plane;
                primary->disable_plane = skl_disable_plane;
+               primary->get_hw_state = skl_plane_get_hw_state;
        } else if (INTEL_GEN(dev_priv) >= 9) {
                intel_primary_formats = skl_primary_formats;
                num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13194,6 +13223,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 
                primary->update_plane = skl_update_plane;
                primary->disable_plane = skl_disable_plane;
+               primary->get_hw_state = skl_plane_get_hw_state;
        } else if (INTEL_GEN(dev_priv) >= 4) {
                intel_primary_formats = i965_primary_formats;
                num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13201,6 +13231,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 
                primary->update_plane = i9xx_update_primary_plane;
                primary->disable_plane = i9xx_disable_primary_plane;
+               primary->get_hw_state = i9xx_plane_get_hw_state;
        } else {
                intel_primary_formats = i8xx_primary_formats;
                num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13208,6 +13239,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 
                primary->update_plane = i9xx_update_primary_plane;
                primary->disable_plane = i9xx_disable_primary_plane;
+               primary->get_hw_state = i9xx_plane_get_hw_state;
        }
 
        if (INTEL_GEN(dev_priv) >= 9)
@@ -13297,10 +13329,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
        if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
                cursor->update_plane = i845_update_cursor;
                cursor->disable_plane = i845_disable_cursor;
+               cursor->get_hw_state = i845_cursor_get_hw_state;
                cursor->check_plane = i845_check_cursor;
        } else {
                cursor->update_plane = i9xx_update_cursor;
                cursor->disable_plane = i9xx_disable_cursor;
+               cursor->get_hw_state = i9xx_cursor_get_hw_state;
                cursor->check_plane = i9xx_check_cursor;
        }
 
@@ -14445,6 +14479,8 @@ int intel_modeset_init(struct drm_device *dev)
        enum pipe pipe;
        struct intel_crtc *crtc;
 
+       dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+
        drm_mode_config_init(dev);
 
        dev->mode_config.min_width = 0;
@@ -14643,38 +14679,56 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 
 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
        DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
                      pipe_name(pipe));
 
-       assert_plane_disabled(dev_priv, PLANE_A);
-       assert_plane_disabled(dev_priv, PLANE_B);
+       WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
 
        I915_WRITE(PIPECONF(pipe), 0);
        POSTING_READ(PIPECONF(pipe));
 
-       if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
-               DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
+       intel_wait_for_pipe_scanline_stopped(crtc);
 
        I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
        POSTING_READ(DPLL(pipe));
 }
 
-static bool
-intel_check_plane_mapping(struct intel_crtc *crtc)
+static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
+                                  struct intel_plane *primary)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 val;
+       enum plane plane = primary->plane;
+       u32 val = I915_READ(DSPCNTR(plane));
 
-       if (INTEL_INFO(dev_priv)->num_pipes == 1)
-               return true;
+       return (val & DISPLAY_PLANE_ENABLE) == 0 ||
+               (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+}
 
-       val = I915_READ(DSPCNTR(!crtc->plane));
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
+{
+       struct intel_crtc *crtc;
 
-       if ((val & DISPLAY_PLANE_ENABLE) &&
-           (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
-               return false;
+       if (INTEL_GEN(dev_priv) >= 4)
+               return;
 
-       return true;
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               struct intel_plane *plane =
+                       to_intel_plane(crtc->base.primary);
+
+               if (intel_plane_mapping_ok(crtc, plane))
+                       continue;
+
+               DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
+                             plane->base.name);
+               intel_plane_disable_noatomic(crtc, plane);
+       }
 }
 
 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
@@ -14730,33 +14784,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 
                /* Disable everything but the primary plane */
                for_each_intel_plane_on_crtc(dev, crtc, plane) {
-                       if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
-                               continue;
+                       const struct intel_plane_state *plane_state =
+                               to_intel_plane_state(plane->base.state);
 
-                       trace_intel_disable_plane(&plane->base, crtc);
-                       plane->disable_plane(plane, crtc);
+                       if (plane_state->base.visible &&
+                           plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+                               intel_plane_disable_noatomic(crtc, plane);
                }
        }
 
-       /* We need to sanitize the plane -> pipe mapping first because this will
-        * disable the crtc (and hence change the state) if it is wrong. Note
-        * that gen4+ has a fixed plane -> pipe mapping.  */
-       if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
-               bool plane;
-
-               DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
-                             crtc->base.base.id, crtc->base.name);
-
-               /* Pipe has the wrong plane attached and the plane is active.
-                * Temporarily change the plane mapping and disable everything
-                * ...  */
-               plane = crtc->plane;
-               crtc->base.primary->state->visible = true;
-               crtc->plane = !plane;
-               intel_crtc_disable_noatomic(&crtc->base, ctx);
-               crtc->plane = plane;
-       }
-
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
        if (crtc->active && !intel_crtc_has_encoders(crtc))
@@ -14861,24 +14897,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
        intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
 }
 
-static bool primary_get_hw_state(struct intel_plane *plane)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
-       return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
-}
-
 /* FIXME read out full plane state for all planes */
 static void readout_plane_state(struct intel_crtc *crtc)
 {
-       struct intel_plane *primary = to_intel_plane(crtc->base.primary);
-       bool visible;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_crtc_state *crtc_state =
+               to_intel_crtc_state(crtc->base.state);
+       struct intel_plane *plane;
 
-       visible = crtc->active && primary_get_hw_state(primary);
+       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+               struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
+               bool visible = plane->get_hw_state(plane);
 
-       intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
-                               to_intel_plane_state(primary->base.state),
-                               visible);
+               intel_set_plane_visible(crtc_state, plane_state, visible);
+       }
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15076,6 +15109,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        /* HW state is read out, now we need to sanitize this mess. */
        get_encoder_power_domains(dev_priv);
 
+       intel_sanitize_plane_mapping(dev_priv);
+
        for_each_intel_encoder(dev, encoder) {
                intel_sanitize_encoder(encoder);
        }
@@ -15252,6 +15287,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
        intel_cleanup_gt_powersave(dev_priv);
 
        intel_teardown_gmbus(dev_priv);
+
+       destroy_workqueue(dev_priv->modeset_wq);
 }
 
 void intel_connector_attach_encoder(struct intel_connector *connector,
index 7bc60c8..5d77f75 100644 (file)
@@ -862,6 +862,7 @@ struct intel_plane {
                             const struct intel_plane_state *plane_state);
        void (*disable_plane)(struct intel_plane *plane,
                              struct intel_crtc *crtc);
+       bool (*get_hw_state)(struct intel_plane *plane);
        int (*check_plane)(struct intel_plane *plane,
                           struct intel_crtc_state *crtc_state,
                           struct intel_plane_state *state);
@@ -1736,7 +1737,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
 int intel_backlight_device_register(struct intel_connector *connector);
 void intel_backlight_device_unregister(struct intel_connector *connector);
 #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
 {
        return 0;
 }
@@ -1924,6 +1925,7 @@ void skl_update_plane(struct intel_plane *plane,
                      const struct intel_crtc_state *crtc_state,
                      const struct intel_plane_state *plane_state);
 void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
+bool skl_plane_get_hw_state(struct intel_plane *plane);
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_i915_private *dev_priv);
index ab5bf4e..6074e04 100644 (file)
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
+       /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+       ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+       if (ret)
+               return ret;
+
        /* WaToEnableHwFixForPushConstHWBug:glk */
        WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
                          GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
index b8af351..ea96682 100644 (file)
@@ -697,10 +697,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
 
        /* Due to peculiar init order wrt to hpd handling this is separate. */
        if (drm_fb_helper_initial_config(&ifbdev->helper,
-                                        ifbdev->preferred_bpp)) {
+                                        ifbdev->preferred_bpp))
                intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
-               intel_fbdev_fini(to_i915(ifbdev->helper.dev));
-       }
 }
 
 void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -800,7 +798,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
-       if (ifbdev)
+       if (!ifbdev)
+               return;
+
+       intel_fbdev_sync(ifbdev);
+       if (ifbdev->vma)
                drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
index 5132dc8..4dea833 100644 (file)
@@ -487,7 +487,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                                           crtc_state->limited_color_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
                                           HDMI_QUANTIZATION_RANGE_FULL,
-                                          intel_hdmi->rgb_quant_range_selectable);
+                                          intel_hdmi->rgb_quant_range_selectable,
+                                          is_hdmi2_sink);
 
        /* TODO: handle pixel repetition for YCBCR420 outputs */
        intel_write_infoframe(encoder, crtc_state, &frame);
index eb58271..49fdf09 100644 (file)
@@ -438,7 +438,9 @@ static bool
 gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
 {
        return (i + 1 < num &&
-               !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+               msgs[i].addr == msgs[i + 1].addr &&
+               !(msgs[i].flags & I2C_M_RD) &&
+               (msgs[i].len == 1 || msgs[i].len == 2) &&
                (msgs[i + 1].flags & I2C_M_RD));
 }
 
index 3bf6528..5809b29 100644 (file)
@@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
                };
 
                if (!pci_dev_present(atom_hdaudio_ids)) {
-                       DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
+                       DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
                        lpe_present = true;
                }
        }
index d36e256..e71a8cd 100644 (file)
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 
        GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 
+       if (i915_gem_request_completed(request))
+               return;
+
        if (prio <= READ_ONCE(request->priotree.priority))
                return;
 
index f4a4e94..f0d0dba 100644 (file)
@@ -124,7 +124,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       u32 val;
        gen9_init_clock_gating(dev_priv);
 
        /*
@@ -144,11 +143,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
                I915_WRITE(CHICKEN_MISC_2, val);
        }
 
-       /* Display WA #1133: WaFbcSkipSegments:glk */
-       val = I915_READ(ILK_DPFC_CHICKEN);
-       val &= ~GLK_SKIP_SEG_COUNT_MASK;
-       val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
-       I915_WRITE(ILK_DPFC_CHICKEN, val);
 }
 
 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -8517,7 +8511,6 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       u32 val;
        cnp_init_clock_gating(dev_priv);
 
        /* This is not an Wa. Enable for better image quality */
@@ -8537,12 +8530,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
                I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
                           I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
                           SARBUNIT_CLKGATE_DIS);
-
-       /* Display WA #1133: WaFbcSkipSegments:cnl */
-       val = I915_READ(ILK_DPFC_CHICKEN);
-       val &= ~GLK_SKIP_SEG_COUNT_MASK;
-       val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
-       I915_WRITE(ILK_DPFC_CHICKEN, val);
 }
 
 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
index 6e3b430..55ea5eb 100644 (file)
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev_priv->psr.active) {
-               i915_reg_t psr_ctl;
+               i915_reg_t psr_status;
                u32 psr_status_mask;
 
                if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
                                        0);
 
                if (dev_priv->psr.psr2_support) {
-                       psr_ctl = EDP_PSR2_CTL;
+                       psr_status = EDP_PSR2_STATUS_CTL;
                        psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
 
-                       I915_WRITE(psr_ctl,
-                                  I915_READ(psr_ctl) &
+                       I915_WRITE(EDP_PSR2_CTL,
+                                  I915_READ(EDP_PSR2_CTL) &
                                   ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
 
                } else {
-                       psr_ctl = EDP_PSR_STATUS_CTL;
+                       psr_status = EDP_PSR_STATUS_CTL;
                        psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
 
-                       I915_WRITE(psr_ctl,
-                                  I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+                       I915_WRITE(EDP_PSR_CTL,
+                                  I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
                }
 
                /* Wait till PSR is idle */
                if (intel_wait_for_register(dev_priv,
-                                           psr_ctl, psr_status_mask, 0,
+                                           psr_status, psr_status_mask, 0,
                                            2000))
                        DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
index 8af286c..7e115f3 100644 (file)
@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
 
        DRM_DEBUG_KMS("Enabling DC5\n");
 
+       /* Wa Display #1183: skl,kbl,cfl */
+       if (IS_GEN9_BC(dev_priv))
+               I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+                          SKL_SELECT_ALTERNATE_DC_EXIT);
+
        gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
 }
 
@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
 {
        DRM_DEBUG_KMS("Disabling DC6\n");
 
+       /* Wa Display #1183: skl,kbl,cfl */
+       if (IS_GEN9_BC(dev_priv))
+               I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+                          SKL_SELECT_ALTERNATE_DC_EXIT);
+
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 }
 
@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
 
 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
index 4fcf80c..4a8a5d9 100644 (file)
@@ -329,6 +329,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+bool
+skl_plane_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static void
 chv_update_csc(struct intel_plane *plane, uint32_t format)
 {
@@ -506,6 +526,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static bool
+vlv_plane_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
 {
@@ -646,6 +686,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static bool
+ivb_plane_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret =  I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
 {
@@ -777,6 +836,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static bool
+g4x_plane_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static int
 intel_check_sprite_plane(struct intel_plane *plane,
                         struct intel_crtc_state *crtc_state,
@@ -1232,6 +1310,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = skl_update_plane;
                intel_plane->disable_plane = skl_disable_plane;
+               intel_plane->get_hw_state = skl_plane_get_hw_state;
 
                plane_formats = skl_plane_formats;
                num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1242,6 +1321,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = skl_update_plane;
                intel_plane->disable_plane = skl_disable_plane;
+               intel_plane->get_hw_state = skl_plane_get_hw_state;
 
                plane_formats = skl_plane_formats;
                num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1252,6 +1332,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = vlv_update_plane;
                intel_plane->disable_plane = vlv_disable_plane;
+               intel_plane->get_hw_state = vlv_plane_get_hw_state;
 
                plane_formats = vlv_plane_formats;
                num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1267,6 +1348,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = ivb_update_plane;
                intel_plane->disable_plane = ivb_disable_plane;
+               intel_plane->get_hw_state = ivb_plane_get_hw_state;
 
                plane_formats = snb_plane_formats;
                num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1277,6 +1359,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = g4x_update_plane;
                intel_plane->disable_plane = g4x_disable_plane;
+               intel_plane->get_hw_state = g4x_plane_get_hw_state;
 
                modifiers = i9xx_plane_format_modifiers;
                if (IS_GEN6(dev_priv)) {
index 20e3c65..8c2ce81 100644 (file)
@@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
        i915_check_and_clear_faults(dev_priv);
 }
 
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+{
+       iosf_mbi_register_pmic_bus_access_notifier(
+               &dev_priv->uncore.pmic_bus_access_nb);
+}
+
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
 {
        i915_modparams.enable_rc6 =
@@ -1240,8 +1246,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
                 * bus, which will be busy after this notification, leading to:
                 * "render: timed out waiting for forcewake ack request."
                 * errors.
+                *
+                * The notifier is unregistered during intel_runtime_suspend(),
+                * so it's ok to access the HW here without holding a RPM
+                * wake reference -> disable wakeref asserts for the time of
+                * the access.
                 */
+               disable_rpm_wakeref_asserts(dev_priv);
                intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+               enable_rpm_wakeref_asserts(dev_priv);
                break;
        case MBI_PMIC_BUS_ACCESS_END:
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
index 5827712..9ce079b 100644 (file)
@@ -134,6 +134,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
 void intel_uncore_fini(struct drm_i915_private *dev_priv);
 void intel_uncore_suspend(struct drm_i915_private *dev_priv);
 void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
index 93c7e3f..17d2f3a 100644 (file)
@@ -133,9 +133,16 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
                        plane_disabling = true;
        }
 
-       if (plane_disabling) {
-               drm_atomic_helper_wait_for_vblanks(dev, state);
+       /*
+        * The flip done wait is only strictly required by imx-drm if a deferred
+        * plane disable is in-flight. As the core requires blocking commits
+        * to wait for the flip it is done here unconditionally. This keeps the
+        * workitem around a bit longer than required for the majority of
+        * non-blocking commits, but we accept that for the sake of simplicity.
+        */
+       drm_atomic_helper_wait_for_flip_done(dev, state);
 
+       if (plane_disabling) {
                for_each_old_plane_in_state(state, plane, old_plane_state, i)
                        ipu_plane_disable_deferred(plane);
 
index 0760b93..baab933 100644 (file)
@@ -121,6 +121,7 @@ int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
index 2615912..ef68741 100644 (file)
@@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
                /* Determine if we can get a cache-coherent map, forcing
                 * uncached mapping if we can't.
                 */
-               if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+               if (!nouveau_drm_use_coherent_gpu_mapping(drm))
                        nvbo->force_coherent = true;
        }
 
@@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
                if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
                    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
                        continue;
-               if ((flags & TTM_PL_FLAG_TT  ) && !vmm->page[i].host)
+               if ((flags & TTM_PL_FLAG_TT) &&
+                   (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
                        continue;
 
                /* Select this page size if it's the first that supports
@@ -1446,11 +1447,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
                                args.nv50.ro = 0;
                                args.nv50.kind = mem->kind;
                                args.nv50.comp = mem->comp;
+                               argc = sizeof(args.nv50);
                                break;
                        case NVIF_CLASS_MEM_GF100:
                                args.gf100.version = 0;
                                args.gf100.ro = 0;
                                args.gf100.kind = mem->kind;
+                               argc = sizeof(args.gf100);
                                break;
                        default:
                                WARN_ON(1);
@@ -1458,7 +1461,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
                        }
 
                        ret = nvif_object_map_handle(&mem->mem.object,
-                                                    &argc, argc,
+                                                    &args, argc,
                                                     &handle, &length);
                        if (ret != 1)
                                return ret ? ret : -EINVAL;
index 8d4a5be..56fe261 100644 (file)
@@ -152,9 +152,9 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
        work->cli = cli;
        mutex_lock(&cli->lock);
        list_add_tail(&work->head, &cli->worker);
-       mutex_unlock(&cli->lock);
        if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
                nouveau_cli_work_fence(fence, &work->cb);
+       mutex_unlock(&cli->lock);
 }
 
 static void
index 3331e82..96f6bd8 100644 (file)
@@ -157,8 +157,8 @@ struct nouveau_drm {
                struct nvif_object copy;
                int mtrr;
                int type_vram;
-               int type_host;
-               int type_ncoh;
+               int type_host[2];
+               int type_ncoh[2];
        } ttm;
 
        /* GEM interface support */
@@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev)
        return dev->dev_private;
 }
 
+static inline bool
+nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm)
+{
+       struct nvif_mmu *mmu = &drm->client.mmu;
+       return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
+}
+
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
 bool nouveau_pmops_runtime(void);
index c533d8e..be7357b 100644 (file)
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
        drm_fb_helper_unregister_fbi(&fbcon->helper);
        drm_fb_helper_fini(&fbcon->helper);
 
-       if (nouveau_fb->nvbo) {
+       if (nouveau_fb && nouveau_fb->nvbo) {
                nouveau_vma_del(&nouveau_fb->vma);
                nouveau_bo_unmap(nouveau_fb->nvbo);
                nouveau_bo_unpin(nouveau_fb->nvbo);
index 589a962..c002f89 100644 (file)
@@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
        u8 type;
        int ret;
 
-       if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
-               type = drm->ttm.type_ncoh;
+       if (!nouveau_drm_use_coherent_gpu_mapping(drm))
+               type = drm->ttm.type_ncoh[!!mem->kind];
        else
-               type = drm->ttm.type_host;
+               type = drm->ttm.type_host[0];
 
        if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
                mem->comp = mem->kind = 0;
index 08b974b..dff51a0 100644 (file)
@@ -235,27 +235,46 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
        drm->ttm.mem_global_ref.release = NULL;
 }
 
-int
-nouveau_ttm_init(struct nouveau_drm *drm)
+static int
+nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
 {
-       struct nvkm_device *device = nvxx_device(&drm->client.device);
-       struct nvkm_pci *pci = device->pci;
        struct nvif_mmu *mmu = &drm->client.mmu;
-       struct drm_device *dev = drm->dev;
-       int typei, ret;
+       int typei;
 
        typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
-                                                  NVIF_MEM_COHERENT);
+                                           kind | NVIF_MEM_COHERENT);
        if (typei < 0)
                return -ENOSYS;
 
-       drm->ttm.type_host = typei;
+       drm->ttm.type_host[!!kind] = typei;
 
-       typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+       typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
        if (typei < 0)
                return -ENOSYS;
 
-       drm->ttm.type_ncoh = typei;
+       drm->ttm.type_ncoh[!!kind] = typei;
+       return 0;
+}
+
+int
+nouveau_ttm_init(struct nouveau_drm *drm)
+{
+       struct nvkm_device *device = nvxx_device(&drm->client.device);
+       struct nvkm_pci *pci = device->pci;
+       struct nvif_mmu *mmu = &drm->client.mmu;
+       struct drm_device *dev = drm->dev;
+       int typei, ret;
+
+       ret = nouveau_ttm_init_host(drm, 0);
+       if (ret)
+               return ret;
+
+       if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+           drm->client.device.info.chipset != 0x50) {
+               ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
+               if (ret)
+                       return ret;
+       }
 
        if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
            drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
index 9e2628d..f5371d9 100644 (file)
@@ -67,8 +67,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
                        nvif_vmm_put(&vma->vmm->vmm, &tmp);
                }
                list_del(&vma->head);
-               *pvma = NULL;
                kfree(*pvma);
+               *pvma = NULL;
        }
 }
 
index e146436..08e77cd 100644 (file)
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
        .i2c = g94_i2c_new,
        .imem = nv50_instmem_new,
        .mc = g98_mc_new,
-       .mmu = g84_mmu_new,
+       .mmu = mcp77_mmu_new,
        .mxm = nv50_mxm_new,
        .pci = g94_pci_new,
        .therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
        .i2c = g94_i2c_new,
        .imem = nv50_instmem_new,
        .mc = g98_mc_new,
-       .mmu = g84_mmu_new,
+       .mmu = mcp77_mmu_new,
        .mxm = nv50_mxm_new,
        .pci = g94_pci_new,
        .therm = g84_therm_new,
@@ -2369,7 +2369,7 @@ nv13b_chipset = {
        .imem = gk20a_instmem_new,
        .ltc = gp100_ltc_new,
        .mc = gp10b_mc_new,
-       .mmu = gf100_mmu_new,
+       .mmu = gp10b_mmu_new,
        .secboot = gp10b_secboot_new,
        .pmu = gm20b_pmu_new,
        .timer = gk20a_timer_new,
index a2978a3..700fc75 100644 (file)
@@ -174,6 +174,7 @@ gf119_sor = {
                .links = gf119_sor_dp_links,
                .power = g94_sor_dp_power,
                .pattern = gf119_sor_dp_pattern,
+               .drive = gf119_sor_dp_drive,
                .vcpi = gf119_sor_dp_vcpi,
                .audio = gf119_sor_dp_audio,
                .audio_sym = gf119_sor_dp_audio_sym,
index 9646ade..243f0a5 100644 (file)
@@ -73,7 +73,8 @@ static int
 nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
 {
        struct nvkm_bar *bar = nvkm_bar(subdev);
-       bar->func->bar1.fini(bar);
+       if (bar->func->bar1.fini)
+               bar->func->bar1.fini(bar);
        return 0;
 }
 
index b10077d..35878fb 100644 (file)
@@ -26,7 +26,6 @@ gk20a_bar_func = {
        .dtor = gf100_bar_dtor,
        .oneinit = gf100_bar_oneinit,
        .bar1.init = gf100_bar_bar1_init,
-       .bar1.fini = gf100_bar_bar1_fini,
        .bar1.wait = gf100_bar_bar1_wait,
        .bar1.vmm = gf100_bar_bar1_vmm,
        .flush = g84_bar_flush,
index 972370e..7c7efa4 100644 (file)
@@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
                        if (data) {
                                *ver = nvbios_rd08(bios, data + 0x00);
                                switch (*ver) {
+                               case 0x20:
                                case 0x21:
                                case 0x30:
                                case 0x40:
@@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
        if (data && idx < *cnt) {
                u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
                switch (*ver * !!outp) {
+               case 0x20:
                case 0x21:
                case 0x30:
                        *hdr = nvbios_rd08(bios, data + 0x04);
@@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
                info->type = nvbios_rd16(bios, data + 0x00);
                info->mask = nvbios_rd16(bios, data + 0x02);
                switch (*ver) {
+               case 0x20:
+                       info->mask |= 0x00c0; /* match any link */
+                       /* fall-through */
                case 0x21:
                case 0x30:
                        info->flags     = nvbios_rd08(bios, data + 0x05);
                        info->script[0] = nvbios_rd16(bios, data + 0x06);
                        info->script[1] = nvbios_rd16(bios, data + 0x08);
-                       info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
+                       if (*len >= 0x0c)
+                               info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
                        if (*len >= 0x0f) {
                                info->script[2] = nvbios_rd16(bios, data + 0x0c);
                                info->script[3] = nvbios_rd16(bios, data + 0x0e);
@@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
        memset(info, 0x00, sizeof(*info));
        if (data) {
                switch (*ver) {
+               case 0x20:
                case 0x21:
                        info->dc    = nvbios_rd08(bios, data + 0x02);
                        info->pe    = nvbios_rd08(bios, data + 0x03);
index 1ba7289..db48a1d 100644 (file)
@@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
                        iobj->base.memory.ptrs = &nv50_instobj_fast;
                else
                        iobj->base.memory.ptrs = &nv50_instobj_slow;
-               refcount_inc(&iobj->maps);
+               refcount_set(&iobj->maps, 1);
        }
 
        mutex_unlock(&imem->subdev.mutex);
index 352a65f..67ee983 100644 (file)
@@ -4,6 +4,7 @@ nvkm-y += nvkm/subdev/mmu/nv41.o
 nvkm-y += nvkm/subdev/mmu/nv44.o
 nvkm-y += nvkm/subdev/mmu/nv50.o
 nvkm-y += nvkm/subdev/mmu/g84.o
+nvkm-y += nvkm/subdev/mmu/mcp77.o
 nvkm-y += nvkm/subdev/mmu/gf100.o
 nvkm-y += nvkm/subdev/mmu/gk104.o
 nvkm-y += nvkm/subdev/mmu/gk20a.o
@@ -22,6 +23,7 @@ nvkm-y += nvkm/subdev/mmu/vmmnv04.o
 nvkm-y += nvkm/subdev/mmu/vmmnv41.o
 nvkm-y += nvkm/subdev/mmu/vmmnv44.o
 nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmmcp77.o
 nvkm-y += nvkm/subdev/mmu/vmmgf100.o
 nvkm-y += nvkm/subdev/mmu/vmmgk104.o
 nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
new file mode 100644 (file)
index 0000000..0527b50
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+mcp77_mmu = {
+       .dma_bits = 40,
+       .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+       .mem = {{ -1,  0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+       .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
+       .kind = nv50_mmu_kind,
+       .kind_sys = true,
+};
+
+int
+mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+       return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
+}
index 6d8f61e..da06e64 100644 (file)
@@ -95,6 +95,9 @@ struct nvkm_vmm_desc {
        const struct nvkm_vmm_desc_func *func;
 };
 
+extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
+extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
+
 extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
 extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
 extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
@@ -169,6 +172,11 @@ int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
                  const char *, struct nvkm_vmm **);
 int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
 
+int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void nv50_vmm_flush(struct nvkm_vmm *, int);
+
 int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
                   struct nvkm_mmu *, u64, u64, void *, u32,
                   struct lock_class_key *, const char *, struct nvkm_vmm **);
@@ -200,6 +208,8 @@ int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
                 struct lock_class_key *, const char *, struct nvkm_vmm **);
 int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
                 struct lock_class_key *, const char *, struct nvkm_vmm **);
+int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+                 struct lock_class_key *, const char *, struct nvkm_vmm **);
 int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
                struct lock_class_key *, const char *, struct nvkm_vmm **);
 int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
new file mode 100644 (file)
index 0000000..e63d984
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+mcp77_vmm = {
+       .join = nv50_vmm_join,
+       .part = nv50_vmm_part,
+       .valid = nv50_vmm_valid,
+       .flush = nv50_vmm_flush,
+       .page_block = 1 << 29,
+       .page = {
+               { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
+               { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+               {}
+       }
+};
+
+int
+mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+             struct lock_class_key *key, const char *name,
+             struct nvkm_vmm **pvmm)
+{
+       return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
+                            argv, argc, key, name, pvmm);
+}
index 863a2ed..64f75d9 100644 (file)
@@ -32,7 +32,7 @@ static inline void
 nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
                 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
 {
-       u64 next = addr | map->type, data;
+       u64 next = addr + map->type, data;
        u32 pten;
        int log2blk;
 
@@ -69,7 +69,7 @@ nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
                VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
                nvkm_kmap(pt->memory);
                while (ptes--) {
-                       const u64 data = *map->dma++ | map->type;
+                       const u64 data = *map->dma++ + map->type;
                        VMM_WO064(pt, vmm, ptei++ * 8, data);
                        map->type += map->ctag;
                }
@@ -163,21 +163,21 @@ nv50_vmm_pgd = {
        .pde = nv50_vmm_pgd_pde,
 };
 
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
 nv50_vmm_desc_12[] = {
        { PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
        { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
        {}
 };
 
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
 nv50_vmm_desc_16[] = {
        { PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
        { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
        {}
 };
 
-static void
+void
 nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
 {
        struct nvkm_subdev *subdev = &vmm->mmu->subdev;
@@ -223,7 +223,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
        mutex_unlock(&subdev->mutex);
 }
 
-static int
+int
 nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
               struct nvkm_vmm_map *map)
 {
@@ -321,7 +321,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
        return 0;
 }
 
-static void
+void
 nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
        struct nvkm_vmm_join *join;
@@ -335,7 +335,7 @@ nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
        }
 }
 
-static int
+int
 nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
        const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
index b1b1f36..ee2431a 100644 (file)
@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
        struct nvkm_pci *pci = arg;
        struct nvkm_device *device = pci->subdev.device;
        bool handled = false;
+
+       if (pci->irq < 0)
+               return IRQ_HANDLED;
+
        nvkm_mc_intr_unarm(device);
        if (pci->msi)
                pci->func->msi_rearm(pci);
@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
 
-       if (pci->irq >= 0) {
-               free_irq(pci->irq, pci);
-               pci->irq = -1;
-       }
-
        if (pci->agp.bridge)
                nvkm_agp_fini(pci);
 
@@ -108,8 +107,20 @@ static int
 nvkm_pci_oneinit(struct nvkm_subdev *subdev)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
-       if (pci_is_pcie(pci->pdev))
-               return nvkm_pcie_oneinit(pci);
+       struct pci_dev *pdev = pci->pdev;
+       int ret;
+
+       if (pci_is_pcie(pci->pdev)) {
+               ret = nvkm_pcie_oneinit(pci);
+               if (ret)
+                       return ret;
+       }
+
+       ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+       if (ret)
+               return ret;
+
+       pci->irq = pdev->irq;
        return 0;
 }
 
@@ -117,7 +128,6 @@ static int
 nvkm_pci_init(struct nvkm_subdev *subdev)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
-       struct pci_dev *pdev = pci->pdev;
        int ret;
 
        if (pci->agp.bridge) {
@@ -131,21 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
        if (pci->func->init)
                pci->func->init(pci);
 
-       ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
-       if (ret)
-               return ret;
+       /* Ensure MSI interrupts are armed, for the case where there are
+        * already interrupts pending (for whatever reason) at load time.
+        */
+       if (pci->msi)
+               pci->func->msi_rearm(pci);
 
-       pci->irq = pdev->irq;
-       return ret;
+       return 0;
 }
 
 static void *
 nvkm_pci_dtor(struct nvkm_subdev *subdev)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
+
        nvkm_agp_dtor(pci);
+
+       if (pci->irq >= 0) {
+               /* freq_irq() will call the handler, we use pci->irq == -1
+                * to signal that it's been torn down and should be a noop.
+                */
+               int irq = pci->irq;
+               pci->irq = -1;
+               free_irq(irq, pci);
+       }
+
        if (pci->msi)
                pci_disable_msi(pci->pdev);
+
        return nvkm_pci(subdev);
 }
 
index c226da1..a349cb6 100644 (file)
@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
 
 config DRM_OMAP_PANEL_DPI
        tristate "Generic DPI panel"
+       depends on BACKLIGHT_CLASS_DEVICE
        help
          Driver for generic DPI panels.
 
index daf286f..ca1e3b4 100644 (file)
@@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
 }
 
 static const struct soc_device_attribute dpi_soc_devices[] = {
-       { .family = "OMAP3[456]*" },
-       { .family = "[AD]M37*" },
+       { .machine = "OMAP3[456]*" },
+       { .machine = "[AD]M37*" },
        { /* sentinel */ }
 };
 
index d86873f..23db74a 100644 (file)
@@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
 
                        /* then read the message */
                        msg.len = cnt & 0xf;
+                       if (msg.len > CEC_MAX_MSG_SIZE - 2)
+                               msg.len = CEC_MAX_MSG_SIZE - 2;
                        msg.msg[0] = hdmi_read_reg(core->base,
                                                   HDMI_CEC_RX_CMD_HEADER);
                        msg.msg[1] = hdmi_read_reg(core->base,
@@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
        }
 }
 
-static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
-{
-       if (stat1 & 2) {
-               u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
-
-               cec_transmit_done(core->adap,
-                                 CEC_TX_STATUS_NACK |
-                                 CEC_TX_STATUS_MAX_RETRIES,
-                                 0, (dbg3 >> 4) & 7, 0, 0);
-       } else if (stat1 & 1) {
-               cec_transmit_done(core->adap,
-                                 CEC_TX_STATUS_ARB_LOST |
-                                 CEC_TX_STATUS_MAX_RETRIES,
-                                 0, 0, 0, 0);
-       } else if (stat1 == 0) {
-               cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
-                                 0, 0, 0, 0);
-       }
-}
-
 void hdmi4_cec_irq(struct hdmi_core_data *core)
 {
        u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
@@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core)
        hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
        hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
 
-       if (stat0 & 0x40)
+       if (stat0 & 0x20) {
+               cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
+                                 0, 0, 0, 0);
                REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
-       else if (stat0 & 0x24)
-               hdmi_cec_transmit_fifo_empty(core, stat1);
-       if (stat1 & 2) {
+       } else if (stat1 & 0x02) {
                u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
 
                cec_transmit_done(core->adap,
                                  CEC_TX_STATUS_NACK |
                                  CEC_TX_STATUS_MAX_RETRIES,
                                  0, (dbg3 >> 4) & 7, 0, 0);
-       } else if (stat1 & 1) {
-               cec_transmit_done(core->adap,
-                                 CEC_TX_STATUS_ARB_LOST |
-                                 CEC_TX_STATUS_MAX_RETRIES,
-                                 0, 0, 0, 0);
+               REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
        }
        if (stat0 & 0x02)
                hdmi_cec_received_msg(core);
-       if (stat1 & 0x3)
-               REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
 }
 
 static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
@@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
        /*
         * Enable CEC interrupts:
         * Transmit Buffer Full/Empty Change event
-        * Transmitter FIFO Empty event
         * Receiver FIFO Not Empty event
         */
-       hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
+       hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
        /*
         * Enable CEC interrupts:
-        * RX FIFO Overrun Error event
-        * Short Pulse Detected event
         * Frame Retransmit Count Exceeded event
-        * Start Bit Irregularity event
         */
-       hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
+       hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
 
        /* cec calibration enable (self clearing) */
        hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
@@ -352,7 +324,7 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
 {
        const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
                         CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
-       unsigned int ret;
+       int ret;
 
        core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
                "omap4", caps, CEC_MAX_LOG_ADDRS);
index 62e4511..b06f995 100644 (file)
@@ -886,25 +886,36 @@ struct hdmi4_features {
        bool audio_use_mclk;
 };
 
-static const struct hdmi4_features hdmi4_es1_features = {
+static const struct hdmi4_features hdmi4430_es1_features = {
        .cts_swmode = false,
        .audio_use_mclk = false,
 };
 
-static const struct hdmi4_features hdmi4_es2_features = {
+static const struct hdmi4_features hdmi4430_es2_features = {
        .cts_swmode = true,
        .audio_use_mclk = false,
 };
 
-static const struct hdmi4_features hdmi4_es3_features = {
+static const struct hdmi4_features hdmi4_features = {
        .cts_swmode = true,
        .audio_use_mclk = true,
 };
 
 static const struct soc_device_attribute hdmi4_soc_devices[] = {
-       { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
-       { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
-       { .family = "OMAP4",                      .data = &hdmi4_es3_features },
+       {
+               .machine = "OMAP4430",
+               .revision = "ES1.?",
+               .data = &hdmi4430_es1_features,
+       },
+       {
+               .machine = "OMAP4430",
+               .revision = "ES2.?",
+               .data = &hdmi4430_es2_features,
+       },
+       {
+               .family = "OMAP4",
+               .data = &hdmi4_features,
+       },
        { /* sentinel */ }
 };
 
index 1dd3daf..c60a85e 100644 (file)
@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
                match = of_match_node(dmm_of_match, dev->dev.of_node);
                if (!match) {
                        dev_err(&dev->dev, "failed to find matching device node\n");
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto fail;
                }
 
                omap_dmm->plat_data = match->data;
index 898f9a0..a651191 100644 (file)
@@ -5451,28 +5451,6 @@ void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
        WREG32(VM_INVALIDATE_REQUEST, 0x1);
 }
 
-static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
-{
-       int i;
-       uint32_t sh_mem_bases, sh_mem_config;
-
-       sh_mem_bases = 0x6000 | 0x6000 << 16;
-       sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
-       sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
-
-       mutex_lock(&rdev->srbm_mutex);
-       for (i = 8; i < 16; i++) {
-               cik_srbm_select(rdev, 0, 0, 0, i);
-               /* CP and shaders */
-               WREG32(SH_MEM_CONFIG, sh_mem_config);
-               WREG32(SH_MEM_APE1_BASE, 1);
-               WREG32(SH_MEM_APE1_LIMIT, 0);
-               WREG32(SH_MEM_BASES, sh_mem_bases);
-       }
-       cik_srbm_select(rdev, 0, 0, 0, 0);
-       mutex_unlock(&rdev->srbm_mutex);
-}
-
 /**
  * cik_pcie_gart_enable - gart enable
  *
@@ -5586,8 +5564,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
        cik_srbm_select(rdev, 0, 0, 0, 0);
        mutex_unlock(&rdev->srbm_mutex);
 
-       cik_pcie_init_compute_vmid(rdev);
-
        cik_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(rdev->mc.gtt_size >> 20),
index b15755b..b1fe063 100644 (file)
@@ -1285,8 +1285,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
                goto err_pllref;
        }
 
-       pm_runtime_enable(dev);
-
        dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
        dsi->dsi_host.dev = dev;
        ret = mipi_dsi_host_register(&dsi->dsi_host);
@@ -1301,6 +1299,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
        }
 
        dev_set_drvdata(dev, dsi);
+       pm_runtime_enable(dev);
        return 0;
 
 err_mipi_dsi_host:
index dda904e..500b6fb 100644 (file)
@@ -175,11 +175,31 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
        writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
 }
 
+static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
+                                       const struct drm_display_mode *mode)
+{
+       struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
+       unsigned long rate = mode->clock * 1000;
+       unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
+       long rounded_rate;
+
+       /* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
+       if (rate > 165000000)
+               return MODE_CLOCK_HIGH;
+       rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
+       if (rounded_rate > 0 &&
+           max_t(unsigned long, rounded_rate, rate) -
+           min_t(unsigned long, rounded_rate, rate) < diff)
+               return MODE_OK;
+       return MODE_NOCLOCK;
+}
+
 static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
        .atomic_check   = sun4i_hdmi_atomic_check,
        .disable        = sun4i_hdmi_disable,
        .enable         = sun4i_hdmi_enable,
        .mode_set       = sun4i_hdmi_mode_set,
+       .mode_valid     = sun4i_hdmi_mode_valid,
 };
 
 static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
index dc332ea..3ecffa5 100644 (file)
@@ -102,10 +102,13 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
                                        goto out;
                                }
 
-                               if (abs(rate - rounded / i) <
-                                   abs(rate - best_parent / best_div)) {
+                               if (!best_parent ||
+                                   abs(rate - rounded / i / j) <
+                                   abs(rate - best_parent / best_half /
+                                       best_div)) {
                                        best_parent = rounded;
-                                       best_div = i;
+                                       best_half = i;
+                                       best_div = j;
                                }
                        }
                }
index e122f5b..f4284b5 100644 (file)
@@ -724,12 +724,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
        if (IS_ERR(tcon->crtc)) {
                dev_err(dev, "Couldn't create our CRTC\n");
                ret = PTR_ERR(tcon->crtc);
-               goto err_free_clocks;
+               goto err_free_dotclock;
        }
 
        ret = sun4i_rgb_init(drm, tcon);
        if (ret < 0)
-               goto err_free_clocks;
+               goto err_free_dotclock;
 
        if (tcon->quirks->needs_de_be_mux) {
                /*
index b0a1ded..476079f 100644 (file)
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
                                name, err);
                        goto remove;
                }
+       } else {
+               /* fall back to the module clock on SOR0 (eDP/LVDS only) */
+               sor->clk_out = sor->clk;
        }
 
        sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
index 316f831..5d252fb 100644 (file)
@@ -81,6 +81,7 @@ struct ttm_page_pool {
        char                    *name;
        unsigned long           nfrees;
        unsigned long           nrefills;
+       unsigned int            order;
 };
 
 /**
@@ -222,6 +223,17 @@ static struct kobj_type ttm_pool_kobj_type = {
 static struct ttm_pool_manager *_manager;
 
 #ifndef CONFIG_X86
+static int set_pages_wb(struct page *page, int numpages)
+{
+#if IS_ENABLED(CONFIG_AGP)
+       int i;
+
+       for (i = 0; i < numpages; i++)
+               unmap_page_from_agp(page++);
+#endif
+       return 0;
+}
+
 static int set_pages_array_wb(struct page **pages, int addrinarray)
 {
 #if IS_ENABLED(CONFIG_AGP)
@@ -284,13 +296,23 @@ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
 }
 
 /* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages)
+static void ttm_pages_put(struct page *pages[], unsigned npages,
+               unsigned int order)
 {
-       unsigned i;
-       if (set_pages_array_wb(pages, npages))
-               pr_err("Failed to set %d pages to wb!\n", npages);
-       for (i = 0; i < npages; ++i)
-               __free_page(pages[i]);
+       unsigned int i, pages_nr = (1 << order);
+
+       if (order == 0) {
+               if (set_pages_array_wb(pages, npages))
+                       pr_err("Failed to set %d pages to wb!\n", npages);
+       }
+
+       for (i = 0; i < npages; ++i) {
+               if (order > 0) {
+                       if (set_pages_wb(pages[i], pages_nr))
+                               pr_err("Failed to set %d pages to wb!\n", pages_nr);
+               }
+               __free_pages(pages[i], order);
+       }
 }
 
 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -353,7 +375,7 @@ restart:
                         */
                        spin_unlock_irqrestore(&pool->lock, irq_flags);
 
-                       ttm_pages_put(pages_to_free, freed_pages);
+                       ttm_pages_put(pages_to_free, freed_pages, pool->order);
                        if (likely(nr_free != FREE_ALL_PAGES))
                                nr_free -= freed_pages;
 
@@ -388,7 +410,7 @@ restart:
        spin_unlock_irqrestore(&pool->lock, irq_flags);
 
        if (freed_pages)
-               ttm_pages_put(pages_to_free, freed_pages);
+               ttm_pages_put(pages_to_free, freed_pages, pool->order);
 out:
        if (pages_to_free != static_buf)
                kfree(pages_to_free);
@@ -412,6 +434,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        struct ttm_page_pool *pool;
        int shrink_pages = sc->nr_to_scan;
        unsigned long freed = 0;
+       unsigned int nr_free_pool;
 
        if (!mutex_trylock(&lock))
                return SHRINK_STOP;
@@ -419,12 +442,20 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        /* select start pool in round robin fashion */
        for (i = 0; i < NUM_POOLS; ++i) {
                unsigned nr_free = shrink_pages;
+               unsigned page_nr;
+
                if (shrink_pages == 0)
                        break;
+
                pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+               page_nr = (1 << pool->order);
                /* OK to use static buffer since global mutex is held. */
-               shrink_pages = ttm_page_pool_free(pool, nr_free, true);
-               freed += nr_free - shrink_pages;
+               nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
+               shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
+               freed += (nr_free_pool - shrink_pages) << pool->order;
+               if (freed >= sc->nr_to_scan)
+                       break;
+               shrink_pages <<= pool->order;
        }
        mutex_unlock(&lock);
        return freed;
@@ -436,9 +467,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
        unsigned i;
        unsigned long count = 0;
+       struct ttm_page_pool *pool;
 
-       for (i = 0; i < NUM_POOLS; ++i)
-               count += _manager->pools[i].npages;
+       for (i = 0; i < NUM_POOLS; ++i) {
+               pool = &_manager->pools[i];
+               count += (pool->npages << pool->order);
+       }
 
        return count;
 }
@@ -510,8 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
        int r = 0;
        unsigned i, j, cpages;
        unsigned npages = 1 << order;
-       unsigned max_cpages = min(count,
-                       (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+       unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
 
        /* allocate array for page caching change */
        caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
@@ -744,12 +777,14 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                        }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               if (p++ != pages[i + j])
-                                   break;
+                       if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+                               for (j = 0; j < HPAGE_PMD_NR; ++j)
+                                       if (p++ != pages[i + j])
+                                           break;
 
-                       if (j == HPAGE_PMD_NR)
-                               order = HPAGE_PMD_ORDER;
+                               if (j == HPAGE_PMD_NR)
+                                       order = HPAGE_PMD_ORDER;
+                       }
 #endif
 
                        if (page_count(pages[i]) != 1)
@@ -843,7 +878,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 #endif
        struct list_head plist;
        struct page *p = NULL;
-       unsigned count;
+       unsigned count, first;
        int r;
 
        /* No pool for cached pages */
@@ -865,23 +900,26 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 
                i = 0;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               while (npages >= HPAGE_PMD_NR) {
-                       gfp_t huge_flags = gfp_flags;
+               if (!(gfp_flags & GFP_DMA32)) {
+                       while (npages >= HPAGE_PMD_NR) {
+                               gfp_t huge_flags = gfp_flags;
 
-                       huge_flags |= GFP_TRANSHUGE;
-                       huge_flags &= ~__GFP_MOVABLE;
-                       huge_flags &= ~__GFP_COMP;
-                       p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
-                       if (!p)
-                               break;
+                               huge_flags |= GFP_TRANSHUGE;
+                               huge_flags &= ~__GFP_MOVABLE;
+                               huge_flags &= ~__GFP_COMP;
+                               p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+                               if (!p)
+                                       break;
 
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               pages[i++] = p++;
+                               for (j = 0; j < HPAGE_PMD_NR; ++j)
+                                       pages[i++] = p++;
 
-                       npages -= HPAGE_PMD_NR;
+                               npages -= HPAGE_PMD_NR;
+                       }
                }
 #endif
 
+               first = i;
                while (npages) {
                        p = alloc_page(gfp_flags);
                        if (!p) {
@@ -889,6 +927,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
                                return -ENOMEM;
                        }
 
+                       /* Swap the pages if we detect consecutive order */
+                       if (i > first && pages[i - 1] == p - 1)
+                               swap(p, pages[i - 1]);
+
                        pages[i++] = p;
                        --npages;
                }
@@ -917,8 +959,15 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
        r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
                                    npages - count, 0);
 
-       list_for_each_entry(p, &plist, lru)
-               pages[count++] = p;
+       first = count;
+       list_for_each_entry(p, &plist, lru) {
+               struct page *tmp = p;
+
+               /* Swap the pages if we detect consecutive order */
+               if (count > first && pages[count - 1] == tmp - 1)
+                       swap(tmp, pages[count - 1]);
+               pages[count++] = tmp;
+       }
 
        if (r) {
                /* If there is any pages in the list put them back to
@@ -933,7 +982,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 }
 
 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
-               char *name)
+               char *name, unsigned int order)
 {
        spin_lock_init(&pool->lock);
        pool->fill_lock = false;
@@ -941,35 +990,43 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
        pool->npages = pool->nfrees = 0;
        pool->gfp_flags = flags;
        pool->name = name;
+       pool->order = order;
 }
 
 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 {
        int ret;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       unsigned order = HPAGE_PMD_ORDER;
+#else
+       unsigned order = 0;
+#endif
 
        WARN_ON(_manager);
 
        pr_info("Initializing pool allocator\n");
 
        _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+       if (!_manager)
+               return -ENOMEM;
 
-       ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+       ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
 
-       ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+       ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
 
        ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
-                                 GFP_USER | GFP_DMA32, "wc dma");
+                                 GFP_USER | GFP_DMA32, "wc dma", 0);
 
        ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
-                                 GFP_USER | GFP_DMA32, "uc dma");
+                                 GFP_USER | GFP_DMA32, "uc dma", 0);
 
        ttm_page_pool_init_locked(&_manager->wc_pool_huge,
                                  GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
-                                 "wc huge");
+                                 "wc huge", order);
 
        ttm_page_pool_init_locked(&_manager->uc_pool_huge,
                                  GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
-                                 , "uc huge");
+                                 , "uc huge", order);
 
        _manager->options.max_size = max_pages;
        _manager->options.small = SMALL_ALLOCATION;
@@ -1058,7 +1115,6 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
 }
 EXPORT_SYMBOL(ttm_pool_unpopulate);
 
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
 {
        unsigned i, j;
@@ -1129,7 +1185,6 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
        ttm_pool_unpopulate(&tt->ttm);
 }
 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-#endif
 
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
 {
index 4ae45d7..2decc8e 100644 (file)
@@ -637,7 +637,8 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
        mutex_lock(&bo->madv_lock);
        switch (bo->madv) {
        case VC4_MADV_WILLNEED:
-               refcount_inc(&bo->usecnt);
+               if (!refcount_inc_not_zero(&bo->usecnt))
+                       refcount_set(&bo->usecnt, 1);
                ret = 0;
                break;
        case VC4_MADV_DONTNEED:
index 6c32c89..c94cce9 100644 (file)
@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
        struct vc4_exec_info *exec[2];
        struct vc4_bo *bo;
        unsigned long irqflags;
-       unsigned int i, j, unref_list_count, prev_idx;
+       unsigned int i, j, k, unref_list_count;
 
        kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
        if (!kernel_state)
@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
                return;
        }
 
-       prev_idx = 0;
+       k = 0;
        for (i = 0; i < 2; i++) {
                if (!exec[i])
                        continue;
@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
                        WARN_ON(!refcount_read(&bo->usecnt));
                        refcount_inc(&bo->usecnt);
                        drm_gem_object_get(&exec[i]->bo[j]->base);
-                       kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+                       kernel_state->bo[k++] = &exec[i]->bo[j]->base;
                }
 
                list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
                         * because they are naturally unpurgeable.
                         */
                        drm_gem_object_get(&bo->base.base);
-                       kernel_state->bo[j + prev_idx] = &bo->base.base;
-                       j++;
+                       kernel_state->bo[k++] = &bo->base.base;
                }
-               prev_idx = j + 1;
        }
 
+       WARN_ON_ONCE(k != state->bo_count);
+
        if (exec[0])
                state->start_bin = exec[0]->ct0ca;
        if (exec[1])
@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
                  VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
 }
 
+static void
+vc4_flush_texture_caches(struct drm_device *dev)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+       V3D_WRITE(V3D_L2CACTL,
+                 V3D_L2CACTL_L2CCLR);
+
+       V3D_WRITE(V3D_SLCACTL,
+                 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
+                 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
+}
+
 /* Sets the registers for the next job to be actually be executed in
  * the hardware.
  *
@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
        if (!exec)
                return;
 
+       /* A previous RCL may have written to one of our textures, and
+        * our full cache flush at bin time may have occurred before
+        * that RCL completed.  Flush the texture cache now, but not
+        * the instructions or uniforms (since we don't write those
+        * from an RCL).
+        */
+       vc4_flush_texture_caches(dev);
+
        submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
 }
 
@@ -888,8 +909,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
        /* If we got force-completed because of GPU reset rather than
         * through our IRQ handler, signal the fence now.
         */
-       if (exec->fence)
+       if (exec->fence) {
                dma_fence_signal(exec->fence);
+               dma_fence_put(exec->fence);
+       }
 
        if (exec->bo) {
                for (i = 0; i < exec->bo_count; i++) {
index fa37a1c..0b20882 100644 (file)
@@ -424,7 +424,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
                                           vc4_encoder->limited_rgb_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
                                           HDMI_QUANTIZATION_RANGE_FULL,
-                                          vc4_encoder->rgb_range_selectable);
+                                          vc4_encoder->rgb_range_selectable,
+                                          false);
 
        vc4_hdmi_write_infoframe(encoder, &frame);
 }
index 7d7af3a..3dd62d7 100644 (file)
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
        list_move_tail(&exec->head, &vc4->job_done_list);
        if (exec->fence) {
                dma_fence_signal_locked(exec->fence);
+               dma_fence_put(exec->fence);
                exec->fence = NULL;
        }
        vc4_submit_next_render_job(dev);
@@ -225,6 +226,9 @@ vc4_irq_uninstall(struct drm_device *dev)
        /* Clear any pending interrupts we might have left. */
        V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
 
+       /* Finish any interrupt handler still in flight. */
+       disable_irq(dev->irq);
+
        cancel_work_sync(&vc4->overflow_mem_work);
 }
 
index 622cd43..493f392 100644 (file)
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
                return ret;
 
        vc4_v3d_init_hw(vc4->dev);
+
+       /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
+       enable_irq(vc4->dev->irq);
        vc4_irq_postinstall(vc4->dev);
 
        return 0;
index 21c62a3..87e8af5 100644 (file)
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
        }
 
        view_type = vmw_view_cmd_to_type(header->id);
+       if (view_type == vmw_view_max)
+               return -EINVAL;
        cmd = container_of(header, typeof(*cmd), header);
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter,
index 0545740..fcd5814 100644 (file)
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
        vps->pinned = 0;
 
        /* Mapping is managed by prepare_fb/cleanup_fb */
-       memset(&vps->guest_map, 0, sizeof(vps->guest_map));
        memset(&vps->host_map, 0, sizeof(vps->host_map));
        vps->cpp = 0;
 
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
 
 
        /* Should have been freed by cleanup_fb */
-       if (vps->guest_map.virtual) {
-               DRM_ERROR("Guest mapping not freed\n");
-               ttm_bo_kunmap(&vps->guest_map);
-       }
-
        if (vps->host_map.virtual) {
                DRM_ERROR("Host mapping not freed\n");
                ttm_bo_kunmap(&vps->host_map);
@@ -1869,7 +1863,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  */
 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       return -ENOSYS;
+       return -EINVAL;
 }
 
 /**
index ff9c838..cd9da2d 100644 (file)
@@ -175,7 +175,7 @@ struct vmw_plane_state {
        int pinned;
 
        /* For CPU Blit */
-       struct ttm_bo_kmap_obj host_map, guest_map;
+       struct ttm_bo_kmap_obj host_map;
        unsigned int cpp;
 };
 
index b8a0980..3824595 100644 (file)
@@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
        .set_property = vmw_du_connector_set_property,
        .destroy = vmw_ldu_connector_destroy,
        .reset = vmw_du_connector_reset,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+       .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+       .atomic_destroy_state = vmw_du_connector_destroy_state,
        .atomic_set_property = vmw_du_connector_atomic_set_property,
        .atomic_get_property = vmw_du_connector_atomic_get_property,
 };
index bc5f602..63a4cd7 100644 (file)
@@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
        .set_property = vmw_du_connector_set_property,
        .destroy = vmw_sou_connector_destroy,
        .reset = vmw_du_connector_reset,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+       .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+       .atomic_destroy_state = vmw_du_connector_destroy_state,
        .atomic_set_property = vmw_du_connector_atomic_set_property,
        .atomic_get_property = vmw_du_connector_atomic_get_property,
 };
index 90b5437..b68d748 100644 (file)
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
        bool defined;
 
        /* For CPU Blit */
-       struct ttm_bo_kmap_obj host_map, guest_map;
+       struct ttm_bo_kmap_obj host_map;
        unsigned int cpp;
 };
 
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        s32 src_pitch, dst_pitch;
        u8 *src, *dst;
        bool not_used;
-
+       struct ttm_bo_kmap_obj guest_map;
+       int ret;
 
        if (!dirty->num_hits)
                return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        if (width == 0 || height == 0)
                return;
 
+       ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
+                         &guest_map);
+       if (ret) {
+               DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
+                         ret);
+               goto out_cleanup;
+       }
 
        /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
        src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
 
        dst_pitch = ddirty->pitch;
-       dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
+       dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
        dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
 
 
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
                vmw_fifo_commit(dev_priv, sizeof(*cmd));
        }
 
+       ttm_bo_kunmap(&guest_map);
 out_cleanup:
        ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
        ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
 {
        struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 
-       if (vps->guest_map.virtual)
-               ttm_bo_kunmap(&vps->guest_map);
-
        if (vps->host_map.virtual)
                ttm_bo_kunmap(&vps->host_map);
 
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
         */
        if (vps->content_fb_type == SEPARATE_DMA &&
            !(dev_priv->capabilities & SVGA_CAP_3D)) {
-
-               struct vmw_framebuffer_dmabuf *new_vfbd;
-
-               new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
-
-               ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
-                                    NULL);
-               if (ret)
-                       goto out_srf_unpin;
-
-               ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
-                                 new_vfbd->buffer->base.num_pages,
-                                 &vps->guest_map);
-
-               ttm_bo_unreserve(&new_vfbd->buffer->base);
-
-               if (ret) {
-                       DRM_ERROR("Failed to map content buffer to CPU\n");
-                       goto out_srf_unpin;
-               }
-
                ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
                                  vps->surf->res.backup->base.num_pages,
                                  &vps->host_map);
                if (ret) {
                        DRM_ERROR("Failed to map display buffer to CPU\n");
-                       ttm_bo_kunmap(&vps->guest_map);
                        goto out_srf_unpin;
                }
 
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
        stdu->display_srf = vps->surf;
        stdu->content_fb_type = vps->content_fb_type;
        stdu->cpp = vps->cpp;
-       memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
        memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
 
        if (!stdu->defined)
index f3fcb83..0c3f608 100644 (file)
@@ -551,7 +551,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
                ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
                break;
        default:
-               hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
+               hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
                ret = 0;
        }
 
index 68cdc96..271f314 100644 (file)
@@ -696,8 +696,16 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
                                              (u8 *)&word, 2);
                break;
        case I2C_SMBUS_I2C_BLOCK_DATA:
-               size = I2C_SMBUS_BLOCK_DATA;
-               /* fallthrough */
+               if (read_write == I2C_SMBUS_READ) {
+                       read_length = data->block[0];
+                       count = cp2112_write_read_req(buf, addr, read_length,
+                                                     command, NULL, 0);
+               } else {
+                       count = cp2112_write_req(buf, addr, command,
+                                                data->block + 1,
+                                                data->block[0]);
+               }
+               break;
        case I2C_SMBUS_BLOCK_DATA:
                if (I2C_SMBUS_READ == read_write) {
                        count = cp2112_write_read_req(buf, addr,
@@ -785,6 +793,9 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
        case I2C_SMBUS_WORD_DATA:
                data->word = le16_to_cpup((__le16 *)buf);
                break;
+       case I2C_SMBUS_I2C_BLOCK_DATA:
+               memcpy(data->block + 1, buf, read_length);
+               break;
        case I2C_SMBUS_BLOCK_DATA:
                if (read_length > I2C_SMBUS_BLOCK_MAX) {
                        ret = -EPROTO;
index 9325545..edc0f64 100644 (file)
 
 #ifdef CONFIG_HOLTEK_FF
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
-MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
-
 /*
  * These commands and parameters are currently known:
  *
@@ -223,3 +219,7 @@ static struct hid_driver holtek_driver = {
        .probe = holtek_probe,
 };
 module_hid_driver(holtek_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
index 19f0cf3..ba0a092 100644 (file)
@@ -659,22 +659,28 @@ void vmbus_close(struct vmbus_channel *channel)
                 */
                return;
        }
-       mutex_lock(&vmbus_connection.channel_mutex);
        /*
         * Close all the sub-channels first and then close the
         * primary channel.
         */
        list_for_each_safe(cur, tmp, &channel->sc_list) {
                cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
-               vmbus_close_internal(cur_channel);
                if (cur_channel->rescind) {
+                       wait_for_completion(&cur_channel->rescind_event);
+                       mutex_lock(&vmbus_connection.channel_mutex);
+                       vmbus_close_internal(cur_channel);
                        hv_process_channel_removal(
                                           cur_channel->offermsg.child_relid);
+               } else {
+                       mutex_lock(&vmbus_connection.channel_mutex);
+                       vmbus_close_internal(cur_channel);
                }
+               mutex_unlock(&vmbus_connection.channel_mutex);
        }
        /*
         * Now close the primary.
         */
+       mutex_lock(&vmbus_connection.channel_mutex);
        vmbus_close_internal(channel);
        mutex_unlock(&vmbus_connection.channel_mutex);
 }
index ec5454f..c21020b 100644 (file)
@@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void)
                return NULL;
 
        spin_lock_init(&channel->lock);
+       init_completion(&channel->rescind_event);
 
        INIT_LIST_HEAD(&channel->sc_list);
        INIT_LIST_HEAD(&channel->percpu_list);
@@ -898,6 +899,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        /*
         * Now wait for offer handling to complete.
         */
+       vmbus_rescind_cleanup(channel);
        while (READ_ONCE(channel->probe_done) == false) {
                /*
                 * We wait here until any channel offer is currently
@@ -913,7 +915,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        if (channel->device_obj) {
                if (channel->chn_rescind_callback) {
                        channel->chn_rescind_callback(channel);
-                       vmbus_rescind_cleanup(channel);
                        return;
                }
                /*
@@ -922,7 +923,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 */
                dev = get_device(&channel->device_obj->device);
                if (dev) {
-                       vmbus_rescind_cleanup(channel);
                        vmbus_device_unregister(channel->device_obj);
                        put_device(dev);
                }
@@ -936,13 +936,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 * 2. Then close the primary channel.
                 */
                mutex_lock(&vmbus_connection.channel_mutex);
-               vmbus_rescind_cleanup(channel);
                if (channel->state == CHANNEL_OPEN_STATE) {
                        /*
                         * The channel is currently not open;
                         * it is safe for us to cleanup the channel.
                         */
                        hv_process_channel_removal(rescind->child_relid);
+               } else {
+                       complete(&channel->rescind_event);
                }
                mutex_unlock(&vmbus_connection.channel_mutex);
        }
index 76ed9a2..610223f 100644 (file)
@@ -1378,6 +1378,8 @@ void vmbus_device_unregister(struct hv_device *device_obj)
        pr_debug("child device %s unregistered\n",
                dev_name(&device_obj->device));
 
+       kset_unregister(device_obj->channels_kset);
+
        /*
         * Kick off the process of unregistering the device.
         * This will call vmbus_remove() and eventually vmbus_device_release()
index c9790e2..af51230 100644 (file)
@@ -143,6 +143,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
                                    struct hwmon_device *hwdev, int index)
 {
        struct hwmon_thermal_data *tdata;
+       struct thermal_zone_device *tzd;
 
        tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
        if (!tdata)
@@ -151,8 +152,14 @@ static int hwmon_thermal_add_sensor(struct device *dev,
        tdata->hwdev = hwdev;
        tdata->index = index;
 
-       devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
-                                            &hwmon_thermal_ops);
+       tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+                                                  &hwmon_thermal_ops);
+       /*
+        * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
+        * so ignore that error but forward any other error.
+        */
+       if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
+               return PTR_ERR(tzd);
 
        return 0;
 }
@@ -621,14 +628,20 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                                if (!chip->ops->is_visible(drvdata, hwmon_temp,
                                                           hwmon_temp_input, j))
                                        continue;
-                               if (info[i]->config[j] & HWMON_T_INPUT)
-                                       hwmon_thermal_add_sensor(dev, hwdev, j);
+                               if (info[i]->config[j] & HWMON_T_INPUT) {
+                                       err = hwmon_thermal_add_sensor(dev,
+                                                               hwdev, j);
+                                       if (err)
+                                               goto free_device;
+                               }
                        }
                }
        }
 
        return hdev;
 
+free_device:
+       device_unregister(hdev);
 free_hwmon:
        kfree(hwdev);
 ida_remove:
index 5f11dc0..e5234f9 100644 (file)
@@ -22,6 +22,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
 #define JC42_REG_TEMP          0x05
 #define JC42_REG_MANID         0x06
 #define JC42_REG_DEVICEID      0x07
+#define JC42_REG_SMBUS         0x22 /* NXP and Atmel, possibly others? */
 
 /* Status bits in temperature register */
 #define JC42_ALARM_CRIT_BIT    15
@@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = {
 #define GT_MANID               0x1c68  /* Giantec */
 #define GT_MANID2              0x132d  /* Giantec, 2nd mfg ID */
 
+/* SMBUS register */
+#define SMBUS_STMOUT           BIT(7)  /* SMBus time-out, active low */
+
 /* Supported chips */
 
 /* Analog Devices */
@@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        data->extended = !!(cap & JC42_CAP_RANGE);
 
+       if (device_property_read_bool(dev, "smbus-timeout-disable")) {
+               int smbus;
+
+               /*
+                * Not all chips support this register, but from a
+                * quick read of various datasheets no chip appears
+                * incompatible with the below attempt to disable
+                * the timeout. And the whole thing is opt-in...
+                */
+               smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
+               if (smbus < 0)
+                       return smbus;
+               i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
+                                            smbus | SMBUS_STMOUT);
+       }
+
        config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
        if (config < 0)
                return config;
index 52a58b8..a139940 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/kernel.h>
+#include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
@@ -499,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
 static long pmbus_reg2data_direct(struct pmbus_data *data,
                                  struct pmbus_sensor *sensor)
 {
-       long val = (s16) sensor->data;
-       long m, b, R;
+       s64 b, val = (s16)sensor->data;
+       s32 m, R;
 
        m = data->info->m[sensor->class];
        b = data->info->b[sensor->class];
@@ -528,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
                R--;
        }
        while (R < 0) {
-               val = DIV_ROUND_CLOSEST(val, 10);
+               val = div_s64(val + 5LL, 10L);  /* round closest */
                R++;
        }
 
-       return (val - b) / m;
+       val = div_s64(val - b, m);
+       return clamp_val(val, LONG_MIN, LONG_MAX);
 }
 
 /*
@@ -656,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
 static u16 pmbus_data2reg_direct(struct pmbus_data *data,
                                 struct pmbus_sensor *sensor, long val)
 {
-       long m, b, R;
+       s64 b, val64 = val;
+       s32 m, R;
 
        m = data->info->m[sensor->class];
        b = data->info->b[sensor->class];
@@ -673,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
                R -= 3;         /* Adjust R and b for data in milli-units */
                b *= 1000;
        }
-       val = val * m + b;
+       val64 = val64 * m + b;
 
        while (R > 0) {
-               val *= 10;
+               val64 *= 10;
                R--;
        }
        while (R < 0) {
-               val = DIV_ROUND_CLOSEST(val, 10);
+               val64 = div_s64(val64 + 5LL, 10L);  /* round closest */
                R++;
        }
 
-       return val;
+       return (u16)clamp_val(val64, S16_MIN, S16_MAX);
 }
 
 static u16 pmbus_data2reg_vid(struct pmbus_data *data,
index bd126a7..7da7564 100644 (file)
@@ -42,9 +42,11 @@ static struct stm_ftrace {
  * @len:       length of the data packet
  */
 static void notrace
-stm_ftrace_write(const void *buf, unsigned int len)
+stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
 {
-       stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
+       struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
+
+       stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
 }
 
 static int stm_ftrace_link(struct stm_source_data *data)
index 0d05dad..44cffad 100644 (file)
@@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct platform_device_id cht_wc_i2c_adap_id_table[] = {
+static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
        { .name = "cht_wcove_ext_chgr" },
        {},
 };
index 21bf619..9fee4c0 100644 (file)
@@ -280,8 +280,6 @@ struct dw_i2c_dev {
        int                     (*acquire_lock)(struct dw_i2c_dev *dev);
        void                    (*release_lock)(struct dw_i2c_dev *dev);
        bool                    pm_disabled;
-       bool                    suspended;
-       bool                    skip_resume;
        void                    (*disable)(struct dw_i2c_dev *dev);
        void                    (*disable_int)(struct dw_i2c_dev *dev);
        int                     (*init)(struct dw_i2c_dev *dev);
index 58add69..153b947 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/reset.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/suspend.h>
 
 #include "i2c-designware-core.h"
 
@@ -372,6 +373,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
        adap->dev.of_node = pdev->dev.of_node;
 
+       dev_pm_set_driver_flags(&pdev->dev,
+                               DPM_FLAG_SMART_PREPARE |
+                               DPM_FLAG_SMART_SUSPEND |
+                               DPM_FLAG_LEAVE_SUSPENDED);
+
        /* The code below assumes runtime PM to be disabled. */
        WARN_ON(pm_runtime_enabled(&pdev->dev));
 
@@ -435,12 +441,24 @@ MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
 #ifdef CONFIG_PM_SLEEP
 static int dw_i2c_plat_prepare(struct device *dev)
 {
-       return pm_runtime_suspended(dev);
+       /*
+        * If the ACPI companion device object is present for this device, it
+        * may be accessed during suspend and resume of other devices via I2C
+        * operation regions, so tell the PM core and middle layers to avoid
+        * skipping system suspend/resume callbacks for it in that case.
+        */
+       return !has_acpi_companion(dev);
 }
 
 static void dw_i2c_plat_complete(struct device *dev)
 {
-       if (dev->power.direct_complete)
+       /*
+        * The device can only be in runtime suspend at this point if it has not
+        * been resumed throughout the ending system suspend/resume cycle, so if
+        * the platform firmware might mess up with it, request the runtime PM
+        * framework to resume it.
+        */
+       if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
                pm_request_resume(dev);
 }
 #else
@@ -453,16 +471,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
 {
        struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
 
-       if (i_dev->suspended) {
-               i_dev->skip_resume = true;
-               return 0;
-       }
-
        i_dev->disable(i_dev);
        i2c_dw_plat_prepare_clk(i_dev, false);
 
-       i_dev->suspended = true;
-
        return 0;
 }
 
@@ -470,19 +481,9 @@ static int dw_i2c_plat_resume(struct device *dev)
 {
        struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
 
-       if (!i_dev->suspended)
-               return 0;
-
-       if (i_dev->skip_resume) {
-               i_dev->skip_resume = false;
-               return 0;
-       }
-
        i2c_dw_plat_prepare_clk(i_dev, true);
        i_dev->init(i_dev);
 
-       i_dev->suspended = false;
-
        return 0;
 }
 
index 9e12a53..8eac00e 100644 (file)
@@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        /* Default timeout in interrupt mode: 200 ms */
        priv->adapter.timeout = HZ / 5;
 
+       if (dev->irq == IRQ_NOTCONNECTED)
+               priv->features &= ~FEATURE_IRQ;
+
        if (priv->features & FEATURE_IRQ) {
                u16 pcictl, pcists;
 
index 174579d..462948e 100644 (file)
@@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
 
        if (adapdata->smba) {
                i2c_del_adapter(adap);
-               if (adapdata->port == (0 << 1)) {
+               if (adapdata->port == (0 << piix4_port_shift_sb800)) {
                        release_region(adapdata->smba, SMBIOSIZE);
                        if (adapdata->sb800_main)
                                release_region(SB800_PIIX4_SMB_IDX, 2);
index dab5176..d4f9cef 100644 (file)
@@ -1,10 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * i2c-stm32.h
  *
  * Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
  * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  *
- * License terms:  GNU General Public License (GPL), version 2
  */
 
 #ifndef _I2C_STM32_H
index 4ec1084..47c8d00 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for STMicroelectronics STM32 I2C controller
  *
@@ -6,11 +7,11 @@
  * http://www.st.com/resource/en/reference_manual/DM00031020.pdf
  *
  * Copyright (C) M'boumba Cedric Madianga 2016
+ * Copyright (C) STMicroelectronics 2017
  * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  *
  * This driver is based on i2c-st.c
  *
- * License terms:  GNU General Public License (GPL), version 2
  */
 
 #include <linux/clk.h>
index d4a6e9c..b445b3b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for STMicroelectronics STM32F7 I2C controller
  *
@@ -7,11 +8,11 @@
  * http://www.st.com/resource/en/reference_manual/dm00124865.pdf
  *
  * Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
  * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  *
  * This driver is based on i2c-stm32f4.c
  *
- * License terms:  GNU General Public License (GPL), version 2
  */
 #include <linux/clk.h>
 #include <linux/delay.h>
index 31186ea..509a600 100644 (file)
@@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
                                        property_entries_dup(info->properties);
                        if (IS_ERR(devinfo->board_info.properties)) {
                                status = PTR_ERR(devinfo->board_info.properties);
+                               kfree(devinfo);
                                break;
                        }
                }
@@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
                                        GFP_KERNEL);
                        if (!devinfo->board_info.resources) {
                                status = -ENOMEM;
+                               kfree(devinfo);
                                break;
                        }
                }
index 706164b..f7829a7 100644 (file)
@@ -821,8 +821,12 @@ void i2c_unregister_device(struct i2c_client *client)
 {
        if (!client)
                return;
-       if (client->dev.of_node)
+
+       if (client->dev.of_node) {
                of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+               of_node_put(client->dev.of_node);
+       }
+
        if (ACPI_COMPANION(&client->dev))
                acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
        device_unregister(&client->dev);
index 4bb9927..a1082c0 100644 (file)
@@ -397,16 +397,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
                                   the underlying bus driver */
                break;
        case I2C_SMBUS_I2C_BLOCK_DATA:
+               if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+                       dev_err(&adapter->dev, "Invalid block %s size %d\n",
+                               read_write == I2C_SMBUS_READ ? "read" : "write",
+                               data->block[0]);
+                       return -EINVAL;
+               }
+
                if (read_write == I2C_SMBUS_READ) {
                        msg[1].len = data->block[0];
                } else {
                        msg[0].len = data->block[0] + 1;
-                       if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
-                               dev_err(&adapter->dev,
-                                       "Invalid block write size %d\n",
-                                       data->block[0]);
-                               return -EINVAL;
-                       }
                        for (i = 1; i <= data->block[0]; i++)
                                msgbuf0[i] = data->block[i];
                }
index ef86296..39e3b34 100644 (file)
@@ -629,6 +629,18 @@ config SPEAR_ADC
          To compile this driver as a module, choose M here: the
          module will be called spear_adc.
 
+config SD_ADC_MODULATOR
+       tristate "Generic sigma delta modulator"
+       depends on OF
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
+       help
+         Select this option to enables sigma delta modulator. This driver can
+         support generic sigma delta modulators.
+
+         This driver can also be built as a module.  If so, the module
+         will be called sd_adc_modulator.
+
 config STM32_ADC_CORE
        tristate "STMicroelectronics STM32 adc core"
        depends on ARCH_STM32 || COMPILE_TEST
@@ -656,6 +668,31 @@ config STM32_ADC
          This driver can also be built as a module.  If so, the module
          will be called stm32-adc.
 
+config STM32_DFSDM_CORE
+       tristate "STMicroelectronics STM32 DFSDM core"
+       depends on (ARCH_STM32 && OF) || COMPILE_TEST
+       select REGMAP
+       select REGMAP_MMIO
+       help
+         Select this option to enable the  driver for STMicroelectronics
+         STM32 digital filter for sigma delta converter.
+
+         This driver can also be built as a module.  If so, the module
+         will be called stm32-dfsdm-core.
+
+config STM32_DFSDM_ADC
+       tristate "STMicroelectronics STM32 dfsdm adc"
+       depends on (ARCH_STM32 && OF) || COMPILE_TEST
+       select STM32_DFSDM_CORE
+       select REGMAP_MMIO
+       select IIO_BUFFER_HW_CONSUMER
+       help
+         Select this option to support ADCSigma delta modulator for
+         STMicroelectronics STM32 digital filter for sigma delta converter.
+
+         This driver can also be built as a module.  If so, the module
+         will be called stm32-dfsdm-adc.
+
 config STX104
        tristate "Apex Embedded Systems STX104 driver"
        depends on PC104 && X86 && ISA_BUS_API
index 9572c10..28a9423 100644 (file)
@@ -64,6 +64,8 @@ obj-$(CONFIG_STX104) += stx104.o
 obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
 obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
 obj-$(CONFIG_STM32_ADC) += stm32-adc.o
+obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
+obj-$(CONFIG_STM32_DFSDM_ADC) += stm32-dfsdm-adc.o
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
 obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
 obj-$(CONFIG_TI_ADC084S021) += ti-adc084s021.o
@@ -82,3 +84,4 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
 obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
 xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
 obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
index 3576ec7..9ad6042 100644 (file)
@@ -1011,7 +1011,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, indio_dev);
 
        ddata->irq = platform_get_irq_byname(pdev, "adcdone");
-       if (!ddata->irq)
+       if (ddata->irq < 0)
                return -ENODEV;
 
        error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
index 9c6932f..3604714 100644 (file)
@@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel {
 
 struct meson_sar_adc_data {
        bool                                    has_bl30_integration;
+       u32                                     bandgap_reg;
        unsigned int                            resolution;
        const char                              *name;
+       const struct regmap_config              *regmap_config;
 };
 
 struct meson_sar_adc_priv {
@@ -242,13 +244,20 @@ struct meson_sar_adc_priv {
        int                                     calibscale;
 };
 
-static const struct regmap_config meson_sar_adc_regmap_config = {
+static const struct regmap_config meson_sar_adc_regmap_config_gxbb = {
        .reg_bits = 8,
        .val_bits = 32,
        .reg_stride = 4,
        .max_register = MESON_SAR_ADC_REG13,
 };
 
+static const struct regmap_config meson_sar_adc_regmap_config_meson8 = {
+       .reg_bits = 8,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = MESON_SAR_ADC_DELTA_10,
+};
+
 static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
 {
        struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -600,7 +609,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
        init.num_parents = 1;
 
        priv->clk_gate.reg = base + MESON_SAR_ADC_REG3;
-       priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN);
+       priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN);
        priv->clk_gate.hw.init = &init;
 
        priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw);
@@ -685,6 +694,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
        return 0;
 }
 
+static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
+{
+       struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+       u32 enable_mask;
+
+       if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11)
+               enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
+       else
+               enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
+
+       regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask,
+                          on_off ? enable_mask : 0);
+}
+
 static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
 {
        struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -717,9 +740,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
        regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
                           MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN);
+
+       meson_sar_adc_set_bandgap(indio_dev, true);
+
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN,
                           MESON_SAR_ADC_REG3_ADC_EN);
@@ -739,8 +762,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
 err_adc_clk:
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN, 0);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
+       meson_sar_adc_set_bandgap(indio_dev, false);
        clk_disable_unprepare(priv->sana_clk);
 err_sana_clk:
        clk_disable_unprepare(priv->core_clk);
@@ -765,8 +787,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
 
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN, 0);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
+
+       meson_sar_adc_set_bandgap(indio_dev, false);
 
        clk_disable_unprepare(priv->sana_clk);
        clk_disable_unprepare(priv->core_clk);
@@ -844,30 +866,40 @@ static const struct iio_info meson_sar_adc_iio_info = {
 
 static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
        .has_bl30_integration = false,
+       .bandgap_reg = MESON_SAR_ADC_DELTA_10,
+       .regmap_config = &meson_sar_adc_regmap_config_meson8,
        .resolution = 10,
        .name = "meson-meson8-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
        .has_bl30_integration = false,
+       .bandgap_reg = MESON_SAR_ADC_DELTA_10,
+       .regmap_config = &meson_sar_adc_regmap_config_meson8,
        .resolution = 10,
        .name = "meson-meson8b-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 10,
        .name = "meson-gxbb-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 12,
        .name = "meson-gxl-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 12,
        .name = "meson-gxm-saradc",
 };
@@ -945,7 +977,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
                return ret;
 
        priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
-                                            &meson_sar_adc_regmap_config);
+                                            priv->data->regmap_config);
        if (IS_ERR(priv->regmap))
                return PTR_ERR(priv->regmap);
 
diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c
new file mode 100644 (file)
index 0000000..560d8c7
--- /dev/null
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic sigma delta modulator driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+static const struct iio_info iio_sd_mod_iio_info;
+
+static const struct iio_chan_spec iio_sd_mod_ch = {
+       .type = IIO_VOLTAGE,
+       .indexed = 1,
+       .scan_type = {
+               .sign = 'u',
+               .realbits = 1,
+               .shift = 0,
+       },
+};
+
+static int iio_sd_mod_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct iio_dev *iio;
+
+       iio = devm_iio_device_alloc(dev, 0);
+       if (!iio)
+               return -ENOMEM;
+
+       iio->dev.parent = dev;
+       iio->dev.of_node = dev->of_node;
+       iio->name = dev_name(dev);
+       iio->info = &iio_sd_mod_iio_info;
+       iio->modes = INDIO_BUFFER_HARDWARE;
+
+       iio->num_channels = 1;
+       iio->channels = &iio_sd_mod_ch;
+
+       platform_set_drvdata(pdev, iio);
+
+       return devm_iio_device_register(&pdev->dev, iio);
+}
+
+static const struct of_device_id sd_adc_of_match[] = {
+       { .compatible = "sd-modulator" },
+       { .compatible = "ads1201" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sd_adc_of_match);
+
+static struct platform_driver iio_sd_mod_adc = {
+       .driver = {
+               .name = "iio_sd_adc_mod",
+               .of_match_table = of_match_ptr(sd_adc_of_match),
+       },
+       .probe = iio_sd_mod_probe,
+};
+
+module_platform_driver(iio_sd_mod_adc);
+
+MODULE_DESCRIPTION("Basic sigma delta modulator");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
new file mode 100644 (file)
index 0000000..daa026d
--- /dev/null
@@ -0,0 +1,1205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is the ADC part of the STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+#define DFSDM_DMA_BUFFER_SIZE (4 * PAGE_SIZE)
+
+/* Conversion timeout */
+#define DFSDM_TIMEOUT_US 100000
+#define DFSDM_TIMEOUT (msecs_to_jiffies(DFSDM_TIMEOUT_US / 1000))
+
+/* Oversampling attribute default */
+#define DFSDM_DEFAULT_OVERSAMPLING  100
+
+/* Oversampling max values */
+#define DFSDM_MAX_INT_OVERSAMPLING 256
+#define DFSDM_MAX_FL_OVERSAMPLING 1024
+
+/* Max sample resolutions */
+#define DFSDM_MAX_RES BIT(31)
+#define DFSDM_DATA_RES BIT(23)
+
+enum sd_converter_type {
+       DFSDM_AUDIO,
+       DFSDM_IIO,
+};
+
+struct stm32_dfsdm_dev_data {
+       int type;
+       int (*init)(struct iio_dev *indio_dev);
+       unsigned int num_channels;
+       const struct regmap_config *regmap_cfg;
+};
+
+struct stm32_dfsdm_adc {
+       struct stm32_dfsdm *dfsdm;
+       const struct stm32_dfsdm_dev_data *dev_data;
+       unsigned int fl_id;
+       unsigned int ch_id;
+
+       /* ADC specific */
+       unsigned int oversamp;
+       struct iio_hw_consumer *hwc;
+       struct completion completion;
+       u32 *buffer;
+
+       /* Audio specific */
+       unsigned int spi_freq;  /* SPI bus clock frequency */
+       unsigned int sample_freq; /* Sample frequency after filter decimation */
+       int (*cb)(const void *data, size_t size, void *cb_priv);
+       void *cb_priv;
+
+       /* DMA */
+       u8 *rx_buf;
+       unsigned int bufi; /* Buffer current position */
+       unsigned int buf_sz; /* Buffer size */
+       struct dma_chan *dma_chan;
+       dma_addr_t dma_buf;
+};
+
+struct stm32_dfsdm_str2field {
+       const char      *name;
+       unsigned int    val;
+};
+
+/* DFSDM channel serial interface type */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_type[] = {
+       { "SPI_R", 0 }, /* SPI with data on rising edge */
+       { "SPI_F", 1 }, /* SPI with data on falling edge */
+       { "MANCH_R", 2 }, /* Manchester codec, rising edge = logic 0 */
+       { "MANCH_F", 3 }, /* Manchester codec, falling edge = logic 1 */
+       {},
+};
+
+/* DFSDM channel clock source */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_src[] = {
+       /* External SPI clock (CLKIN x) */
+       { "CLKIN", DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL },
+       /* Internal SPI clock (CLKOUT) */
+       { "CLKOUT", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL },
+       /* Internal SPI clock divided by 2 (falling edge) */
+       { "CLKOUT_F", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING },
+       /* Internal SPI clock divided by 2 (falling edge) */
+       { "CLKOUT_R", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING },
+       {},
+};
+
+static int stm32_dfsdm_str2val(const char *str,
+                              const struct stm32_dfsdm_str2field *list)
+{
+       const struct stm32_dfsdm_str2field *p = list;
+
+       for (p = list; p && p->name; p++)
+               if (!strcmp(p->name, str))
+                       return p->val;
+
+       return -EINVAL;
+}
+
+static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+                               unsigned int fast, unsigned int oversamp)
+{
+       unsigned int i, d, fosr, iosr;
+       u64 res;
+       s64 delta;
+       unsigned int m = 1;     /* multiplication factor */
+       unsigned int p = fl->ford;      /* filter order (ford) */
+
+       pr_debug("%s: Requested oversampling: %d\n",  __func__, oversamp);
+       /*
+        * This function tries to compute filter oversampling and integrator
+        * oversampling, base on oversampling ratio requested by user.
+        *
+        * Decimation d depends on the filter order and the oversampling ratios.
+        * ford: filter order
+        * fosr: filter over sampling ratio
+        * iosr: integrator over sampling ratio
+        */
+       if (fl->ford == DFSDM_FASTSINC_ORDER) {
+               m = 2;
+               p = 2;
+       }
+
+       /*
+        * Look for filter and integrator oversampling ratios which allows
+        * to reach 24 bits data output resolution.
+        * Leave as soon as if exact resolution if reached.
+        * Otherwise the higher resolution below 32 bits is kept.
+        */
+       for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+               for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+                       if (fast)
+                               d = fosr * iosr;
+                       else if (fl->ford == DFSDM_FASTSINC_ORDER)
+                               d = fosr * (iosr + 3) + 2;
+                       else
+                               d = fosr * (iosr - 1 + p) + p;
+
+                       if (d > oversamp)
+                               break;
+                       else if (d != oversamp)
+                               continue;
+                       /*
+                        * Check resolution (limited to signed 32 bits)
+                        *   res <= 2^31
+                        * Sincx filters:
+                        *   res = m * fosr^p x iosr (with m=1, p=ford)
+                        * FastSinc filter
+                        *   res = m * fosr^p x iosr (with m=2, p=2)
+                        */
+                       res = fosr;
+                       for (i = p - 1; i > 0; i--) {
+                               res = res * (u64)fosr;
+                               if (res > DFSDM_MAX_RES)
+                                       break;
+                       }
+                       if (res > DFSDM_MAX_RES)
+                               continue;
+                       res = res * (u64)m * (u64)iosr;
+                       if (res > DFSDM_MAX_RES)
+                               continue;
+
+                       delta = res - DFSDM_DATA_RES;
+
+                       if (res >= fl->res) {
+                               fl->res = res;
+                               fl->fosr = fosr;
+                               fl->iosr = iosr;
+                               fl->fast = fast;
+                               pr_debug("%s: fosr = %d, iosr = %d\n",
+                                        __func__, fl->fosr, fl->iosr);
+                       }
+
+                       if (!delta)
+                               return 0;
+               }
+       }
+
+       if (!fl->fosr)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int stm32_dfsdm_start_channel(struct stm32_dfsdm *dfsdm,
+                                    unsigned int ch_id)
+{
+       return regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+                                 DFSDM_CHCFGR1_CHEN_MASK,
+                                 DFSDM_CHCFGR1_CHEN(1));
+}
+
+static void stm32_dfsdm_stop_channel(struct stm32_dfsdm *dfsdm,
+                                    unsigned int ch_id)
+{
+       regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+                          DFSDM_CHCFGR1_CHEN_MASK, DFSDM_CHCFGR1_CHEN(0));
+}
+
+static int stm32_dfsdm_chan_configure(struct stm32_dfsdm *dfsdm,
+                                     struct stm32_dfsdm_channel *ch)
+{
+       unsigned int id = ch->id;
+       struct regmap *regmap = dfsdm->regmap;
+       int ret;
+
+       ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+                                DFSDM_CHCFGR1_SITP_MASK,
+                                DFSDM_CHCFGR1_SITP(ch->type));
+       if (ret < 0)
+               return ret;
+       ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+                                DFSDM_CHCFGR1_SPICKSEL_MASK,
+                                DFSDM_CHCFGR1_SPICKSEL(ch->src));
+       if (ret < 0)
+               return ret;
+       return regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+                                 DFSDM_CHCFGR1_CHINSEL_MASK,
+                                 DFSDM_CHCFGR1_CHINSEL(ch->alt_si));
+}
+
+static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
+                                   unsigned int fl_id)
+{
+       int ret;
+
+       /* Enable filter */
+       ret = regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+                                DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(1));
+       if (ret < 0)
+               return ret;
+
+       /* Start conversion */
+       return regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+                                 DFSDM_CR1_RSWSTART_MASK,
+                                 DFSDM_CR1_RSWSTART(1));
+}
+
+static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm, unsigned int fl_id)
+{
+       /* Disable conversion */
+       regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+                          DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(0));
+}
+
+static int stm32_dfsdm_filter_configure(struct stm32_dfsdm *dfsdm,
+                                       unsigned int fl_id, unsigned int ch_id)
+{
+       struct regmap *regmap = dfsdm->regmap;
+       struct stm32_dfsdm_filter *fl = &dfsdm->fl_list[fl_id];
+       int ret;
+
+       /* Average integrator oversampling */
+       ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
+                                DFSDM_FCR_IOSR(fl->iosr - 1));
+       if (ret)
+               return ret;
+
+       /* Filter order and Oversampling */
+       ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
+                                DFSDM_FCR_FOSR(fl->fosr - 1));
+       if (ret)
+               return ret;
+
+       ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FORD_MASK,
+                                DFSDM_FCR_FORD(fl->ford));
+       if (ret)
+               return ret;
+
+       /* No scan mode supported for the moment */
+       ret = regmap_update_bits(regmap, DFSDM_CR1(fl_id), DFSDM_CR1_RCH_MASK,
+                                DFSDM_CR1_RCH(ch_id));
+       if (ret)
+               return ret;
+
+       return regmap_update_bits(regmap, DFSDM_CR1(fl_id),
+                                 DFSDM_CR1_RSYNC_MASK,
+                                 DFSDM_CR1_RSYNC(fl->sync_mode));
+}
+
+static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
+                                       struct iio_dev *indio_dev,
+                                       struct iio_chan_spec *ch)
+{
+       struct stm32_dfsdm_channel *df_ch;
+       const char *of_str;
+       int chan_idx = ch->scan_index;
+       int ret, val;
+
+       ret = of_property_read_u32_index(indio_dev->dev.of_node,
+                                        "st,adc-channels", chan_idx,
+                                        &ch->channel);
+       if (ret < 0) {
+               dev_err(&indio_dev->dev,
+                       " Error parsing 'st,adc-channels' for idx %d\n",
+                       chan_idx);
+               return ret;
+       }
+       if (ch->channel >= dfsdm->num_chs) {
+               dev_err(&indio_dev->dev,
+                       " Error bad channel number %d (max = %d)\n",
+                       ch->channel, dfsdm->num_chs);
+               return -EINVAL;
+       }
+
+       ret = of_property_read_string_index(indio_dev->dev.of_node,
+                                           "st,adc-channel-names", chan_idx,
+                                           &ch->datasheet_name);
+       if (ret < 0) {
+               dev_err(&indio_dev->dev,
+                       " Error parsing 'st,adc-channel-names' for idx %d\n",
+                       chan_idx);
+               return ret;
+       }
+
+       df_ch =  &dfsdm->ch_list[ch->channel];
+       df_ch->id = ch->channel;
+
+       ret = of_property_read_string_index(indio_dev->dev.of_node,
+                                           "st,adc-channel-types", chan_idx,
+                                           &of_str);
+       if (!ret) {
+               val  = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+               if (val < 0)
+                       return val;
+       } else {
+               val = 0;
+       }
+       df_ch->type = val;
+
+       ret = of_property_read_string_index(indio_dev->dev.of_node,
+                                           "st,adc-channel-clk-src", chan_idx,
+                                           &of_str);
+       if (!ret) {
+               val  = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+               if (val < 0)
+                       return val;
+       } else {
+               val = 0;
+       }
+       df_ch->src = val;
+
+       ret = of_property_read_u32_index(indio_dev->dev.of_node,
+                                        "st,adc-alt-channel", chan_idx,
+                                        &df_ch->alt_si);
+       if (ret < 0)
+               df_ch->alt_si = 0;
+
+       return 0;
+}
+
+static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
+                                         uintptr_t priv,
+                                         const struct iio_chan_spec *chan,
+                                         char *buf)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", adc->spi_freq);
+}
+
+static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
+                                         uintptr_t priv,
+                                         const struct iio_chan_spec *chan,
+                                         const char *buf, size_t len)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+       struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+       unsigned int sample_freq = adc->sample_freq;
+       unsigned int spi_freq;
+       int ret;
+
+       dev_err(&indio_dev->dev, "enter %s\n", __func__);
+       /* If DFSDM is master on SPI, SPI freq can not be updated */
+       if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+               return -EPERM;
+
+       ret = kstrtoint(buf, 0, &spi_freq);
+       if (ret)
+               return ret;
+
+       if (!spi_freq)
+               return -EINVAL;
+
+       if (sample_freq) {
+               if (spi_freq % sample_freq)
+                       dev_warn(&indio_dev->dev,
+                                "Sampling rate not accurate (%d)\n",
+                                spi_freq / (spi_freq / sample_freq));
+
+               ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / sample_freq));
+               if (ret < 0) {
+                       dev_err(&indio_dev->dev,
+                               "No filter parameters that match!\n");
+                       return ret;
+               }
+       }
+       adc->spi_freq = spi_freq;
+
+       return len;
+}
+
+static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma)
+{
+       struct regmap *regmap = adc->dfsdm->regmap;
+       int ret;
+       unsigned int dma_en = 0, cont_en = 0;
+
+       ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id);
+       if (ret < 0)
+               return ret;
+
+       ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
+                                          adc->ch_id);
+       if (ret < 0)
+               goto stop_channels;
+
+       if (dma) {
+               /* Enable DMA transfer*/
+               dma_en =  DFSDM_CR1_RDMAEN(1);
+               /* Enable conversion triggered by SPI clock*/
+               cont_en = DFSDM_CR1_RCONT(1);
+       }
+       /* Enable DMA transfer*/
+       ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+                                DFSDM_CR1_RDMAEN_MASK, dma_en);
+       if (ret < 0)
+               goto stop_channels;
+
+       /* Enable conversion triggered by SPI clock*/
+       ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+                                DFSDM_CR1_RCONT_MASK, cont_en);
+       if (ret < 0)
+               goto stop_channels;
+
+       ret = stm32_dfsdm_start_filter(adc->dfsdm, adc->fl_id);
+       if (ret < 0)
+               goto stop_channels;
+
+       return 0;
+
+stop_channels:
+       regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+                          DFSDM_CR1_RDMAEN_MASK, 0);
+
+       regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+                          DFSDM_CR1_RCONT_MASK, 0);
+       stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id);
+
+       return ret;
+}
+
+static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
+{
+       struct regmap *regmap = adc->dfsdm->regmap;
+
+       stm32_dfsdm_stop_filter(adc->dfsdm, adc->fl_id);
+
+       /* Clean conversion options */
+       regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+                          DFSDM_CR1_RDMAEN_MASK, 0);
+
+       regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+                          DFSDM_CR1_RCONT_MASK, 0);
+
+       stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id);
+}
+
+static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
+                                    unsigned int val)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       unsigned int watermark = DFSDM_DMA_BUFFER_SIZE / 2;
+
+       /*
+        * DMA cyclic transfers are used, buffer is split into two periods.
+        * There should be :
+        * - always one buffer (period) DMA is working on
+        * - one buffer (period) driver pushed to ASoC side.
+        */
+       watermark = min(watermark, val * (unsigned int)(sizeof(u32)));
+       adc->buf_sz = watermark * 2;
+
+       return 0;
+}
+
+static unsigned int stm32_dfsdm_adc_dma_residue(struct stm32_dfsdm_adc *adc)
+{
+       struct dma_tx_state state;
+       enum dma_status status;
+
+       status = dmaengine_tx_status(adc->dma_chan,
+                                    adc->dma_chan->cookie,
+                                    &state);
+       if (status == DMA_IN_PROGRESS) {
+               /* Residue is size in bytes from end of buffer */
+               unsigned int i = adc->buf_sz - state.residue;
+               unsigned int size;
+
+               /* Return available bytes */
+               if (i >= adc->bufi)
+                       size = i - adc->bufi;
+               else
+                       size = adc->buf_sz + i - adc->bufi;
+
+               return size;
+       }
+
+       return 0;
+}
+
+static void stm32_dfsdm_audio_dma_buffer_done(void *data)
+{
+       struct iio_dev *indio_dev = data;
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       int available = stm32_dfsdm_adc_dma_residue(adc);
+       size_t old_pos;
+
+       /*
+        * FIXME: In Kernel interface does not support cyclic DMA buffer,and
+        * offers only an interface to push data samples per samples.
+        * For this reason IIO buffer interface is not used and interface is
+        * bypassed using a private callback registered by ASoC.
+        * This should be a temporary solution waiting a cyclic DMA engine
+        * support in IIO.
+        */
+
+       dev_dbg(&indio_dev->dev, "%s: pos = %d, available = %d\n", __func__,
+               adc->bufi, available);
+       old_pos = adc->bufi;
+
+       while (available >= indio_dev->scan_bytes) {
+               u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
+
+               /* Mask 8 LSB that contains the channel ID */
+               *buffer = (*buffer & 0xFFFFFF00) << 8;
+               available -= indio_dev->scan_bytes;
+               adc->bufi += indio_dev->scan_bytes;
+               if (adc->bufi >= adc->buf_sz) {
+                       if (adc->cb)
+                               adc->cb(&adc->rx_buf[old_pos],
+                                        adc->buf_sz - old_pos, adc->cb_priv);
+                       adc->bufi = 0;
+                       old_pos = 0;
+               }
+       }
+       if (adc->cb)
+               adc->cb(&adc->rx_buf[old_pos], adc->bufi - old_pos,
+                       adc->cb_priv);
+}
+
+static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       struct dma_async_tx_descriptor *desc;
+       dma_cookie_t cookie;
+       int ret;
+
+       if (!adc->dma_chan)
+               return -EINVAL;
+
+       dev_dbg(&indio_dev->dev, "%s size=%d watermark=%d\n", __func__,
+               adc->buf_sz, adc->buf_sz / 2);
+
+       /* Prepare a DMA cyclic transaction */
+       desc = dmaengine_prep_dma_cyclic(adc->dma_chan,
+                                        adc->dma_buf,
+                                        adc->buf_sz, adc->buf_sz / 2,
+                                        DMA_DEV_TO_MEM,
+                                        DMA_PREP_INTERRUPT);
+       if (!desc)
+               return -EBUSY;
+
+       desc->callback = stm32_dfsdm_audio_dma_buffer_done;
+       desc->callback_param = indio_dev;
+
+       cookie = dmaengine_submit(desc);
+       ret = dma_submit_error(cookie);
+       if (ret) {
+               dmaengine_terminate_all(adc->dma_chan);
+               return ret;
+       }
+
+       /* Issue pending DMA requests */
+       dma_async_issue_pending(adc->dma_chan);
+
+       return 0;
+}
+
+static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       int ret;
+
+       /* Reset adc buffer index */
+       adc->bufi = 0;
+
+       ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+       if (ret < 0)
+               return ret;
+
+       ret = stm32_dfsdm_start_conv(adc, true);
+       if (ret) {
+               dev_err(&indio_dev->dev, "Can't start conversion\n");
+               goto stop_dfsdm;
+       }
+
+       if (adc->dma_chan) {
+               ret = stm32_dfsdm_adc_dma_start(indio_dev);
+               if (ret) {
+                       dev_err(&indio_dev->dev, "Can't start DMA\n");
+                       goto err_stop_conv;
+               }
+       }
+
+       return 0;
+
+err_stop_conv:
+       stm32_dfsdm_stop_conv(adc);
+stop_dfsdm:
+       stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+       return ret;
+}
+
+static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+       if (adc->dma_chan)
+               dmaengine_terminate_all(adc->dma_chan);
+
+       stm32_dfsdm_stop_conv(adc);
+
+       stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+       return 0;
+}
+
+static const struct iio_buffer_setup_ops stm32_dfsdm_buffer_setup_ops = {
+       .postenable = &stm32_dfsdm_postenable,
+       .predisable = &stm32_dfsdm_predisable,
+};
+
+/**
+ * stm32_dfsdm_get_buff_cb() - register a callback that will be called when
+ *                             DMA transfer period is achieved.
+ *
+ * @iio_dev: Handle to IIO device.
+ * @cb: Pointer to callback function:
+ *      - data: pointer to data buffer
+ *      - size: size in byte of the data buffer
+ *      - private: pointer to consumer private structure.
+ * @private: Pointer to consumer private structure.
+ */
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+                           int (*cb)(const void *data, size_t size,
+                                     void *private),
+                           void *private)
+{
+       struct stm32_dfsdm_adc *adc;
+
+       if (!iio_dev)
+               return -EINVAL;
+       adc = iio_priv(iio_dev);
+
+       adc->cb = cb;
+       adc->cb_priv = private;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_get_buff_cb);
+
+/**
+ * stm32_dfsdm_release_buff_cb - unregister buffer callback
+ *
+ * @iio_dev: Handle to IIO device.
+ */
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev)
+{
+       struct stm32_dfsdm_adc *adc;
+
+       if (!iio_dev)
+               return -EINVAL;
+       adc = iio_priv(iio_dev);
+
+       adc->cb = NULL;
+       adc->cb_priv = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_release_buff_cb);
+
+static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
+                                  const struct iio_chan_spec *chan, int *res)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       long timeout;
+       int ret;
+
+       reinit_completion(&adc->completion);
+
+       adc->buffer = res;
+
+       ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+                                DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(1));
+       if (ret < 0)
+               goto stop_dfsdm;
+
+       ret = stm32_dfsdm_start_conv(adc, false);
+       if (ret < 0) {
+               regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+                                  DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+               goto stop_dfsdm;
+       }
+
+       timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+                                                           DFSDM_TIMEOUT);
+
+       /* Mask IRQ for regular conversion achievement*/
+       regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+                          DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+
+       if (timeout == 0)
+               ret = -ETIMEDOUT;
+       else if (timeout < 0)
+               ret = timeout;
+       else
+               ret = IIO_VAL_INT;
+
+       stm32_dfsdm_stop_conv(adc);
+
+stop_dfsdm:
+       stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+       return ret;
+}
+
+static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+                                struct iio_chan_spec const *chan,
+                                int val, int val2, long mask)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+       struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+       unsigned int spi_freq = adc->spi_freq;
+       int ret = -EINVAL;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+               ret = stm32_dfsdm_set_osrs(fl, 0, val);
+               if (!ret)
+                       adc->oversamp = val;
+
+               return ret;
+
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               if (!val)
+                       return -EINVAL;
+               if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+                       spi_freq = adc->dfsdm->spi_master_freq;
+
+               if (spi_freq % val)
+                       dev_warn(&indio_dev->dev,
+                                "Sampling rate not accurate (%d)\n",
+                                spi_freq / (spi_freq / val));
+
+               ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / val));
+               if (ret < 0) {
+                       dev_err(&indio_dev->dev,
+                               "Not able to find parameter that match!\n");
+                       return ret;
+               }
+               adc->sample_freq = val;
+
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
+                               struct iio_chan_spec const *chan, int *val,
+                               int *val2, long mask)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       int ret;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               ret = iio_hw_consumer_enable(adc->hwc);
+               if (ret < 0) {
+                       dev_err(&indio_dev->dev,
+                               "%s: IIO enable failed (channel %d)\n",
+                               __func__, chan->channel);
+                       return ret;
+               }
+               ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
+               iio_hw_consumer_disable(adc->hwc);
+               if (ret < 0) {
+                       dev_err(&indio_dev->dev,
+                               "%s: Conversion failed (channel %d)\n",
+                               __func__, chan->channel);
+                       return ret;
+               }
+               return IIO_VAL_INT;
+
+       case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+               *val = adc->oversamp;
+
+               return IIO_VAL_INT;
+
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               *val = adc->sample_freq;
+
+               return IIO_VAL_INT;
+       }
+
+       return -EINVAL;
+}
+
+static const struct iio_info stm32_dfsdm_info_audio = {
+       .hwfifo_set_watermark = stm32_dfsdm_set_watermark,
+       .read_raw = stm32_dfsdm_read_raw,
+       .write_raw = stm32_dfsdm_write_raw,
+};
+
+static const struct iio_info stm32_dfsdm_info_adc = {
+       .read_raw = stm32_dfsdm_read_raw,
+       .write_raw = stm32_dfsdm_write_raw,
+};
+
+static irqreturn_t stm32_dfsdm_irq(int irq, void *arg)
+{
+       struct stm32_dfsdm_adc *adc = arg;
+       struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+       struct regmap *regmap = adc->dfsdm->regmap;
+       unsigned int status, int_en;
+
+       regmap_read(regmap, DFSDM_ISR(adc->fl_id), &status);
+       regmap_read(regmap, DFSDM_CR2(adc->fl_id), &int_en);
+
+       if (status & DFSDM_ISR_REOCF_MASK) {
+               /* Read the data register clean the IRQ status */
+               regmap_read(regmap, DFSDM_RDATAR(adc->fl_id), adc->buffer);
+               complete(&adc->completion);
+       }
+
+       if (status & DFSDM_ISR_ROVRF_MASK) {
+               if (int_en & DFSDM_CR2_ROVRIE_MASK)
+                       dev_warn(&indio_dev->dev, "Overrun detected\n");
+               regmap_update_bits(regmap, DFSDM_ICR(adc->fl_id),
+                                  DFSDM_ICR_CLRROVRF_MASK,
+                                  DFSDM_ICR_CLRROVRF_MASK);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Define external info for SPI Frequency and audio sampling rate that can be
+ * configured by ASoC driver through consumer.h API
+ */
+static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = {
+       /* spi_clk_freq : clock freq on SPI/manchester bus used by channel */
+       {
+               .name = "spi_clk_freq",
+               .shared = IIO_SHARED_BY_TYPE,
+               .read = dfsdm_adc_audio_get_spiclk,
+               .write = dfsdm_adc_audio_set_spiclk,
+       },
+       {},
+};
+
+static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+       if (adc->dma_chan) {
+               dma_free_coherent(adc->dma_chan->device->dev,
+                                 DFSDM_DMA_BUFFER_SIZE,
+                                 adc->rx_buf, adc->dma_buf);
+               dma_release_channel(adc->dma_chan);
+       }
+}
+
+static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       struct dma_slave_config config = {
+               .src_addr = (dma_addr_t)adc->dfsdm->phys_base +
+                       DFSDM_RDATAR(adc->fl_id),
+               .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+       };
+       int ret;
+
+       adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
+       if (!adc->dma_chan)
+               return -EINVAL;
+
+       adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
+                                        DFSDM_DMA_BUFFER_SIZE,
+                                        &adc->dma_buf, GFP_KERNEL);
+       if (!adc->rx_buf) {
+               ret = -ENOMEM;
+               goto err_release;
+       }
+
+       ret = dmaengine_slave_config(adc->dma_chan, &config);
+       if (ret)
+               goto err_free;
+
+       return 0;
+
+err_free:
+       dma_free_coherent(adc->dma_chan->device->dev, DFSDM_DMA_BUFFER_SIZE,
+                         adc->rx_buf, adc->dma_buf);
+err_release:
+       dma_release_channel(adc->dma_chan);
+
+       return ret;
+}
+
+static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
+                                        struct iio_chan_spec *ch)
+{
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       int ret;
+
+       ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
+       if (ret < 0)
+               return ret;
+
+       ch->type = IIO_VOLTAGE;
+       ch->indexed = 1;
+
+       /*
+        * IIO_CHAN_INFO_RAW: used to compute regular conversion
+        * IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
+        */
+       ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+       ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+
+       if (adc->dev_data->type == DFSDM_AUDIO) {
+               ch->scan_type.sign = 's';
+               ch->ext_info = dfsdm_adc_audio_ext_info;
+       } else {
+               ch->scan_type.sign = 'u';
+       }
+       ch->scan_type.realbits = 24;
+       ch->scan_type.storagebits = 32;
+       adc->ch_id = ch->channel;
+
+       return stm32_dfsdm_chan_configure(adc->dfsdm,
+                                         &adc->dfsdm->ch_list[ch->channel]);
+}
+
+static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+{
+       struct iio_chan_spec *ch;
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       struct stm32_dfsdm_channel *d_ch;
+       int ret;
+
+       indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+       indio_dev->setup_ops = &stm32_dfsdm_buffer_setup_ops;
+
+       ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
+       if (!ch)
+               return -ENOMEM;
+
+       ch->scan_index = 0;
+
+       ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+       if (ret < 0) {
+               dev_err(&indio_dev->dev, "Channels init failed\n");
+               return ret;
+       }
+       ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+       d_ch = &adc->dfsdm->ch_list[adc->ch_id];
+       if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+               adc->spi_freq = adc->dfsdm->spi_master_freq;
+
+       indio_dev->num_channels = 1;
+       indio_dev->channels = ch;
+
+       return stm32_dfsdm_dma_request(indio_dev);
+}
+
+static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+{
+       struct iio_chan_spec *ch;
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+       int num_ch;
+       int ret, chan_idx;
+
+       adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
+       ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
+                                  adc->oversamp);
+       if (ret < 0)
+               return ret;
+
+       num_ch = of_property_count_u32_elems(indio_dev->dev.of_node,
+                                            "st,adc-channels");
+       if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) {
+               dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
+               return num_ch < 0 ? num_ch : -EINVAL;
+       }
+
+       /* Bind to SD modulator IIO device */
+       adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
+       if (IS_ERR(adc->hwc))
+               return -EPROBE_DEFER;
+
+       ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch),
+                         GFP_KERNEL);
+       if (!ch)
+               return -ENOMEM;
+
+       for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
+               ch->scan_index = chan_idx;
+               ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+               if (ret < 0) {
+                       dev_err(&indio_dev->dev, "Channels init failed\n");
+                       return ret;
+               }
+       }
+
+       indio_dev->num_channels = num_ch;
+       indio_dev->channels = ch;
+
+       init_completion(&adc->completion);
+
+       return 0;
+}
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_adc_data = {
+       .type = DFSDM_IIO,
+       .init = stm32_dfsdm_adc_init,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_audio_data = {
+       .type = DFSDM_AUDIO,
+       .init = stm32_dfsdm_audio_init,
+};
+
+static const struct of_device_id stm32_dfsdm_adc_match[] = {
+       {
+               .compatible = "st,stm32-dfsdm-adc",
+               .data = &stm32h7_dfsdm_adc_data,
+       },
+       {
+               .compatible = "st,stm32-dfsdm-dmic",
+               .data = &stm32h7_dfsdm_audio_data,
+       },
+       {}
+};
+
+static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct stm32_dfsdm_adc *adc;
+       struct device_node *np = dev->of_node;
+       const struct stm32_dfsdm_dev_data *dev_data;
+       struct iio_dev *iio;
+       char *name;
+       int ret, irq, val;
+
+
+       dev_data = of_device_get_match_data(dev);
+       iio = devm_iio_device_alloc(dev, sizeof(*adc));
+       if (!iio) {
+               dev_err(dev, "%s: Failed to allocate IIO\n", __func__);
+               return -ENOMEM;
+       }
+
+       adc = iio_priv(iio);
+       adc->dfsdm = dev_get_drvdata(dev->parent);
+
+       iio->dev.parent = dev;
+       iio->dev.of_node = np;
+       iio->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+
+       platform_set_drvdata(pdev, adc);
+
+       ret = of_property_read_u32(dev->of_node, "reg", &adc->fl_id);
+       if (ret != 0) {
+               dev_err(dev, "Missing reg property\n");
+               return -EINVAL;
+       }
+
+       name = devm_kzalloc(dev, sizeof("dfsdm-adc0"), GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
+       if (dev_data->type == DFSDM_AUDIO) {
+               iio->info = &stm32_dfsdm_info_audio;
+               snprintf(name, sizeof("dfsdm-pdm0"), "dfsdm-pdm%d", adc->fl_id);
+       } else {
+               iio->info = &stm32_dfsdm_info_adc;
+               snprintf(name, sizeof("dfsdm-adc0"), "dfsdm-adc%d", adc->fl_id);
+       }
+       iio->name = name;
+
+       /*
+        * In a first step IRQs generated for channels are not treated.
+        * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
+        */
+       irq = platform_get_irq(pdev, 0);
+       ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
+                              0, pdev->name, adc);
+       if (ret < 0) {
+               dev_err(dev, "Failed to request IRQ\n");
+               return ret;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "st,filter-order", &val);
+       if (ret < 0) {
+               dev_err(dev, "Failed to set filter order\n");
+               return ret;
+       }
+
+       adc->dfsdm->fl_list[adc->fl_id].ford = val;
+
+       ret = of_property_read_u32(dev->of_node, "st,filter0-sync", &val);
+       if (!ret)
+               adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
+
+       adc->dev_data = dev_data;
+       ret = dev_data->init(iio);
+       if (ret < 0)
+               return ret;
+
+       ret = iio_device_register(iio);
+       if (ret < 0)
+               goto err_cleanup;
+
+       dev_err(dev, "of_platform_populate\n");
+       if (dev_data->type == DFSDM_AUDIO) {
+               ret = of_platform_populate(np, NULL, NULL, dev);
+               if (ret < 0) {
+                       dev_err(dev, "Failed to find an audio DAI\n");
+                       goto err_unregister;
+               }
+       }
+
+       return 0;
+
+err_unregister:
+       iio_device_unregister(iio);
+err_cleanup:
+       stm32_dfsdm_dma_release(iio);
+
+       return ret;
+}
+
+static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
+{
+       struct stm32_dfsdm_adc *adc = platform_get_drvdata(pdev);
+       struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+       if (adc->dev_data->type == DFSDM_AUDIO)
+               of_platform_depopulate(&pdev->dev);
+       iio_device_unregister(indio_dev);
+       stm32_dfsdm_dma_release(indio_dev);
+
+       return 0;
+}
+
+static struct platform_driver stm32_dfsdm_adc_driver = {
+       .driver = {
+               .name = "stm32-dfsdm-adc",
+               .of_match_table = stm32_dfsdm_adc_match,
+       },
+       .probe = stm32_dfsdm_adc_probe,
+       .remove = stm32_dfsdm_adc_remove,
+};
+module_platform_driver(stm32_dfsdm_adc_driver);
+
+MODULE_DESCRIPTION("STM32 sigma delta ADC");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
new file mode 100644 (file)
index 0000000..6290332
--- /dev/null
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part the core part STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+struct stm32_dfsdm_dev_data {
+       unsigned int num_filters;
+       unsigned int num_channels;
+       const struct regmap_config *regmap_cfg;
+};
+
+#define STM32H7_DFSDM_NUM_FILTERS      4
+#define STM32H7_DFSDM_NUM_CHANNELS     8
+
+static bool stm32_dfsdm_volatile_reg(struct device *dev, unsigned int reg)
+{
+       if (reg < DFSDM_FILTER_BASE_ADR)
+               return false;
+
+       /*
+        * Mask is done on register to avoid to list registers of all
+        * filter instances.
+        */
+       switch (reg & DFSDM_FILTER_REG_MASK) {
+       case DFSDM_CR1(0) & DFSDM_FILTER_REG_MASK:
+       case DFSDM_ISR(0) & DFSDM_FILTER_REG_MASK:
+       case DFSDM_JDATAR(0) & DFSDM_FILTER_REG_MASK:
+       case DFSDM_RDATAR(0) & DFSDM_FILTER_REG_MASK:
+               return true;
+       }
+
+       return false;
+}
+
+static const struct regmap_config stm32h7_dfsdm_regmap_cfg = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = sizeof(u32),
+       .max_register = 0x2B8,
+       .volatile_reg = stm32_dfsdm_volatile_reg,
+       .fast_io = true,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_data = {
+       .num_filters = STM32H7_DFSDM_NUM_FILTERS,
+       .num_channels = STM32H7_DFSDM_NUM_CHANNELS,
+       .regmap_cfg = &stm32h7_dfsdm_regmap_cfg,
+};
+
+struct dfsdm_priv {
+       struct platform_device *pdev; /* platform device */
+
+       struct stm32_dfsdm dfsdm; /* common data exported for all instances */
+
+       unsigned int spi_clk_out_div; /* SPI clkout divider value */
+       atomic_t n_active_ch;   /* number of current active channels */
+
+       struct clk *clk; /* DFSDM clock */
+       struct clk *aclk; /* audio clock */
+};
+
+/**
+ * stm32_dfsdm_start_dfsdm - start global dfsdm interface.
+ *
+ * Enable interface if n_active_ch is not null.
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+       struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+       struct device *dev = &priv->pdev->dev;
+       unsigned int clk_div = priv->spi_clk_out_div;
+       int ret;
+
+       if (atomic_inc_return(&priv->n_active_ch) == 1) {
+               ret = clk_prepare_enable(priv->clk);
+               if (ret < 0) {
+                       dev_err(dev, "Failed to start clock\n");
+                       goto error_ret;
+               }
+               if (priv->aclk) {
+                       ret = clk_prepare_enable(priv->aclk);
+                       if (ret < 0) {
+                               dev_err(dev, "Failed to start audio clock\n");
+                               goto disable_clk;
+                       }
+               }
+
+               /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
+               ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+                                        DFSDM_CHCFGR1_CKOUTDIV_MASK,
+                                        DFSDM_CHCFGR1_CKOUTDIV(clk_div));
+               if (ret < 0)
+                       goto disable_aclk;
+
+               /* Global enable of DFSDM interface */
+               ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+                                        DFSDM_CHCFGR1_DFSDMEN_MASK,
+                                        DFSDM_CHCFGR1_DFSDMEN(1));
+               if (ret < 0)
+                       goto disable_aclk;
+       }
+
+       dev_dbg(dev, "%s: n_active_ch %d\n", __func__,
+               atomic_read(&priv->n_active_ch));
+
+       return 0;
+
+disable_aclk:
+       clk_disable_unprepare(priv->aclk);
+disable_clk:
+       clk_disable_unprepare(priv->clk);
+
+error_ret:
+       atomic_dec(&priv->n_active_ch);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_start_dfsdm);
+
+/**
+ * stm32_dfsdm_stop_dfsdm - stop global DFSDM interface.
+ *
+ * Disable interface if n_active_ch is null
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+       struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+       int ret;
+
+       if (atomic_dec_and_test(&priv->n_active_ch)) {
+               /* Global disable of DFSDM interface */
+               ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+                                        DFSDM_CHCFGR1_DFSDMEN_MASK,
+                                        DFSDM_CHCFGR1_DFSDMEN(0));
+               if (ret < 0)
+                       return ret;
+
+               /* Stop SPI CLKOUT */
+               ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+                                        DFSDM_CHCFGR1_CKOUTDIV_MASK,
+                                        DFSDM_CHCFGR1_CKOUTDIV(0));
+               if (ret < 0)
+                       return ret;
+
+               clk_disable_unprepare(priv->clk);
+               if (priv->aclk)
+                       clk_disable_unprepare(priv->aclk);
+       }
+       dev_dbg(&priv->pdev->dev, "%s: n_active_ch %d\n", __func__,
+               atomic_read(&priv->n_active_ch));
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_stop_dfsdm);
+
+static int stm32_dfsdm_parse_of(struct platform_device *pdev,
+                               struct dfsdm_priv *priv)
+{
+       struct device_node *node = pdev->dev.of_node;
+       struct resource *res;
+       unsigned long clk_freq;
+       unsigned int spi_freq, rem;
+       int ret;
+
+       if (!node)
+               return -EINVAL;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get memory resource\n");
+               return -ENODEV;
+       }
+       priv->dfsdm.phys_base = res->start;
+       priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+
+       /*
+        * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
+        * "dfsdm" or "audio" clocks can be used as source clock for
+        * the SPI clock out signal and internal processing, depending
+        * on use case.
+        */
+       priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
+       if (IS_ERR(priv->clk)) {
+               dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
+               return -EINVAL;
+       }
+
+       priv->aclk = devm_clk_get(&pdev->dev, "audio");
+       if (IS_ERR(priv->aclk))
+               priv->aclk = NULL;
+
+       if (priv->aclk)
+               clk_freq = clk_get_rate(priv->aclk);
+       else
+               clk_freq = clk_get_rate(priv->clk);
+
+       /* SPI clock out frequency */
+       ret = of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+                                  &spi_freq);
+       if (ret < 0) {
+               /* No SPI master mode */
+               return 0;
+       }
+
+       priv->spi_clk_out_div = div_u64_rem(clk_freq, spi_freq, &rem) - 1;
+       priv->dfsdm.spi_master_freq = spi_freq;
+
+       if (rem) {
+               dev_warn(&pdev->dev, "SPI clock not accurate\n");
+               dev_warn(&pdev->dev, "%ld = %d * %d + %d\n",
+                        clk_freq, spi_freq, priv->spi_clk_out_div + 1, rem);
+       }
+
+       return 0;
+};
+
+static const struct of_device_id stm32_dfsdm_of_match[] = {
+       {
+               .compatible = "st,stm32h7-dfsdm",
+               .data = &stm32h7_dfsdm_data,
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
+
+static int stm32_dfsdm_probe(struct platform_device *pdev)
+{
+       struct dfsdm_priv *priv;
+       const struct stm32_dfsdm_dev_data *dev_data;
+       struct stm32_dfsdm *dfsdm;
+       int ret;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->pdev = pdev;
+
+       dev_data = of_device_get_match_data(&pdev->dev);
+
+       dfsdm = &priv->dfsdm;
+       dfsdm->fl_list = devm_kcalloc(&pdev->dev, dev_data->num_filters,
+                                     sizeof(*dfsdm->fl_list), GFP_KERNEL);
+       if (!dfsdm->fl_list)
+               return -ENOMEM;
+
+       dfsdm->num_fls = dev_data->num_filters;
+       dfsdm->ch_list = devm_kcalloc(&pdev->dev, dev_data->num_channels,
+                                     sizeof(*dfsdm->ch_list),
+                                     GFP_KERNEL);
+       if (!dfsdm->ch_list)
+               return -ENOMEM;
+       dfsdm->num_chs = dev_data->num_channels;
+
+       ret = stm32_dfsdm_parse_of(pdev, priv);
+       if (ret < 0)
+               return ret;
+
+       dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
+                                                 dfsdm->base,
+                                                 &stm32h7_dfsdm_regmap_cfg);
+       if (IS_ERR(dfsdm->regmap)) {
+               ret = PTR_ERR(dfsdm->regmap);
+               dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+                       __func__, ret);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, dfsdm);
+
+       return devm_of_platform_populate(&pdev->dev);
+}
+
+static struct platform_driver stm32_dfsdm_driver = {
+       .probe = stm32_dfsdm_probe,
+       .driver = {
+               .name = "stm32-dfsdm",
+               .of_match_table = stm32_dfsdm_of_match,
+       },
+};
+
+module_platform_driver(stm32_dfsdm_driver);
+
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 dfsdm driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
new file mode 100644 (file)
index 0000000..8708394
--- /dev/null
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is part of STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef MDF_STM32_DFSDM__H
+#define MDF_STM32_DFSDM__H
+
+#include <linux/bitfield.h>
+
+/*
+ * STM32 DFSDM - global register map
+ * ________________________________________________________
+ * | Offset |                 Registers block             |
+ * --------------------------------------------------------
+ * | 0x000  |      CHANNEL 0 + COMMON CHANNEL FIELDS      |
+ * --------------------------------------------------------
+ * | 0x020  |                CHANNEL 1                    |
+ * --------------------------------------------------------
+ * | ...    |                .....                        |
+ * --------------------------------------------------------
+ * | 0x0E0  |                CHANNEL 7                    |
+ * --------------------------------------------------------
+ * | 0x100  |      FILTER  0 + COMMON  FILTER FIELDs      |
+ * --------------------------------------------------------
+ * | 0x200  |                FILTER  1                    |
+ * --------------------------------------------------------
+ * | 0x300  |                FILTER  2                    |
+ * --------------------------------------------------------
+ * | 0x400  |                FILTER  3                    |
+ * --------------------------------------------------------
+ */
+
+/*
+ * Channels register definitions
+ */
+#define DFSDM_CHCFGR1(y)  ((y) * 0x20 + 0x00)
+#define DFSDM_CHCFGR2(y)  ((y) * 0x20 + 0x04)
+#define DFSDM_AWSCDR(y)   ((y) * 0x20 + 0x08)
+#define DFSDM_CHWDATR(y)  ((y) * 0x20 + 0x0C)
+#define DFSDM_CHDATINR(y) ((y) * 0x20 + 0x10)
+
+/* CHCFGR1: Channel configuration register 1 */
+#define DFSDM_CHCFGR1_SITP_MASK     GENMASK(1, 0)
+#define DFSDM_CHCFGR1_SITP(v)       FIELD_PREP(DFSDM_CHCFGR1_SITP_MASK, v)
+#define DFSDM_CHCFGR1_SPICKSEL_MASK GENMASK(3, 2)
+#define DFSDM_CHCFGR1_SPICKSEL(v)   FIELD_PREP(DFSDM_CHCFGR1_SPICKSEL_MASK, v)
+#define DFSDM_CHCFGR1_SCDEN_MASK    BIT(5)
+#define DFSDM_CHCFGR1_SCDEN(v)      FIELD_PREP(DFSDM_CHCFGR1_SCDEN_MASK, v)
+#define DFSDM_CHCFGR1_CKABEN_MASK   BIT(6)
+#define DFSDM_CHCFGR1_CKABEN(v)     FIELD_PREP(DFSDM_CHCFGR1_CKABEN_MASK, v)
+#define DFSDM_CHCFGR1_CHEN_MASK     BIT(7)
+#define DFSDM_CHCFGR1_CHEN(v)       FIELD_PREP(DFSDM_CHCFGR1_CHEN_MASK, v)
+#define DFSDM_CHCFGR1_CHINSEL_MASK  BIT(8)
+#define DFSDM_CHCFGR1_CHINSEL(v)    FIELD_PREP(DFSDM_CHCFGR1_CHINSEL_MASK, v)
+#define DFSDM_CHCFGR1_DATMPX_MASK   GENMASK(13, 12)
+#define DFSDM_CHCFGR1_DATMPX(v)     FIELD_PREP(DFSDM_CHCFGR1_DATMPX_MASK, v)
+#define DFSDM_CHCFGR1_DATPACK_MASK  GENMASK(15, 14)
+#define DFSDM_CHCFGR1_DATPACK(v)    FIELD_PREP(DFSDM_CHCFGR1_DATPACK_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTDIV_MASK GENMASK(23, 16)
+#define DFSDM_CHCFGR1_CKOUTDIV(v)   FIELD_PREP(DFSDM_CHCFGR1_CKOUTDIV_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTSRC_MASK BIT(30)
+#define DFSDM_CHCFGR1_CKOUTSRC(v)   FIELD_PREP(DFSDM_CHCFGR1_CKOUTSRC_MASK, v)
+#define DFSDM_CHCFGR1_DFSDMEN_MASK  BIT(31)
+#define DFSDM_CHCFGR1_DFSDMEN(v)    FIELD_PREP(DFSDM_CHCFGR1_DFSDMEN_MASK, v)
+
+/* CHCFGR2: Channel configuration register 2 */
+#define DFSDM_CHCFGR2_DTRBS_MASK    GENMASK(7, 3)
+#define DFSDM_CHCFGR2_DTRBS(v)      FIELD_PREP(DFSDM_CHCFGR2_DTRBS_MASK, v)
+#define DFSDM_CHCFGR2_OFFSET_MASK   GENMASK(31, 8)
+#define DFSDM_CHCFGR2_OFFSET(v)     FIELD_PREP(DFSDM_CHCFGR2_OFFSET_MASK, v)
+
+/* AWSCDR: Channel analog watchdog and short circuit detector */
+#define DFSDM_AWSCDR_SCDT_MASK    GENMASK(7, 0)
+#define DFSDM_AWSCDR_SCDT(v)      FIELD_PREP(DFSDM_AWSCDR_SCDT_MASK, v)
+#define DFSDM_AWSCDR_BKSCD_MASK   GENMASK(15, 12)
+#define DFSDM_AWSCDR_BKSCD(v)    FIELD_PREP(DFSDM_AWSCDR_BKSCD_MASK, v)
+#define DFSDM_AWSCDR_AWFOSR_MASK  GENMASK(20, 16)
+#define DFSDM_AWSCDR_AWFOSR(v)    FIELD_PREP(DFSDM_AWSCDR_AWFOSR_MASK, v)
+#define DFSDM_AWSCDR_AWFORD_MASK  GENMASK(23, 22)
+#define DFSDM_AWSCDR_AWFORD(v)    FIELD_PREP(DFSDM_AWSCDR_AWFORD_MASK, v)
+
+/*
+ * Filters register definitions
+ */
+#define DFSDM_FILTER_BASE_ADR          0x100
+#define DFSDM_FILTER_REG_MASK          0x7F
+#define DFSDM_FILTER_X_BASE_ADR(x)     ((x) * 0x80 + DFSDM_FILTER_BASE_ADR)
+
+#define DFSDM_CR1(x)     (DFSDM_FILTER_X_BASE_ADR(x)  + 0x00)
+#define DFSDM_CR2(x)     (DFSDM_FILTER_X_BASE_ADR(x)  + 0x04)
+#define DFSDM_ISR(x)     (DFSDM_FILTER_X_BASE_ADR(x)  + 0x08)
+#define DFSDM_ICR(x)     (DFSDM_FILTER_X_BASE_ADR(x)  + 0x0C)
+#define DFSDM_JCHGR(x)   (DFSDM_FILTER_X_BASE_ADR(x)  + 0x10)
+#define DFSDM_FCR(x)     (DFSDM_FILTER_X_BASE_ADR(x)  + 0x14)
+#define DFSDM_JDATAR(x)  (DFSDM_FILTER_X_BASE_ADR(x)  + 0x18)
+#define DFSDM_RDATAR(x)  (DFSDM_FILTER_X_BASE_ADR(x)  + 0x1C)
+#define DFSDM_AWHTR(x)   (DFSDM_FILTER_X_BASE_ADR(x)  + 0x20)
+#define DFSDM_AWLTR(x)   (DFSDM_FILTER_X_BASE_ADR(x)  + 0x24)
+#define DFSDM_AWSR(x)    (DFSDM_FILTER_X_BASE_ADR(x)  + 0x28)
+#define DFSDM_AWCFR(x)   (DFSDM_FILTER_X_BASE_ADR(x)  + 0x2C)
+#define DFSDM_EXMAX(x)   (DFSDM_FILTER_X_BASE_ADR(x)  + 0x30)
+#define DFSDM_EXMIN(x)   (DFSDM_FILTER_X_BASE_ADR(x)  + 0x34)
+#define DFSDM_CNVTIMR(x) (DFSDM_FILTER_X_BASE_ADR(x)  + 0x38)
+
+/* CR1 Control register 1 */
+#define DFSDM_CR1_DFEN_MASK    BIT(0)
+#define DFSDM_CR1_DFEN(v)      FIELD_PREP(DFSDM_CR1_DFEN_MASK, v)
+#define DFSDM_CR1_JSWSTART_MASK        BIT(1)
+#define DFSDM_CR1_JSWSTART(v)  FIELD_PREP(DFSDM_CR1_JSWSTART_MASK, v)
+#define DFSDM_CR1_JSYNC_MASK   BIT(3)
+#define DFSDM_CR1_JSYNC(v)     FIELD_PREP(DFSDM_CR1_JSYNC_MASK, v)
+#define DFSDM_CR1_JSCAN_MASK   BIT(4)
+#define DFSDM_CR1_JSCAN(v)     FIELD_PREP(DFSDM_CR1_JSCAN_MASK, v)
+#define DFSDM_CR1_JDMAEN_MASK  BIT(5)
+#define DFSDM_CR1_JDMAEN(v)    FIELD_PREP(DFSDM_CR1_JDMAEN_MASK, v)
+#define DFSDM_CR1_JEXTSEL_MASK GENMASK(12, 8)
+#define DFSDM_CR1_JEXTSEL(v)   FIELD_PREP(DFSDM_CR1_JEXTSEL_MASK, v)
+#define DFSDM_CR1_JEXTEN_MASK  GENMASK(14, 13)
+#define DFSDM_CR1_JEXTEN(v)    FIELD_PREP(DFSDM_CR1_JEXTEN_MASK, v)
+#define DFSDM_CR1_RSWSTART_MASK        BIT(17)
+#define DFSDM_CR1_RSWSTART(v)  FIELD_PREP(DFSDM_CR1_RSWSTART_MASK, v)
+#define DFSDM_CR1_RCONT_MASK   BIT(18)
+#define DFSDM_CR1_RCONT(v)     FIELD_PREP(DFSDM_CR1_RCONT_MASK, v)
+#define DFSDM_CR1_RSYNC_MASK   BIT(19)
+#define DFSDM_CR1_RSYNC(v)     FIELD_PREP(DFSDM_CR1_RSYNC_MASK, v)
+#define DFSDM_CR1_RDMAEN_MASK  BIT(21)
+#define DFSDM_CR1_RDMAEN(v)    FIELD_PREP(DFSDM_CR1_RDMAEN_MASK, v)
+#define DFSDM_CR1_RCH_MASK     GENMASK(26, 24)
+#define DFSDM_CR1_RCH(v)       FIELD_PREP(DFSDM_CR1_RCH_MASK, v)
+#define DFSDM_CR1_FAST_MASK    BIT(29)
+#define DFSDM_CR1_FAST(v)      FIELD_PREP(DFSDM_CR1_FAST_MASK, v)
+#define DFSDM_CR1_AWFSEL_MASK  BIT(30)
+#define DFSDM_CR1_AWFSEL(v)    FIELD_PREP(DFSDM_CR1_AWFSEL_MASK, v)
+
+/* CR2: Control register 2 */
+#define DFSDM_CR2_IE_MASK      GENMASK(6, 0)
+#define DFSDM_CR2_IE(v)                FIELD_PREP(DFSDM_CR2_IE_MASK, v)
+#define DFSDM_CR2_JEOCIE_MASK  BIT(0)
+#define DFSDM_CR2_JEOCIE(v)    FIELD_PREP(DFSDM_CR2_JEOCIE_MASK, v)
+#define DFSDM_CR2_REOCIE_MASK  BIT(1)
+#define DFSDM_CR2_REOCIE(v)    FIELD_PREP(DFSDM_CR2_REOCIE_MASK, v)
+#define DFSDM_CR2_JOVRIE_MASK  BIT(2)
+#define DFSDM_CR2_JOVRIE(v)    FIELD_PREP(DFSDM_CR2_JOVRIE_MASK, v)
+#define DFSDM_CR2_ROVRIE_MASK  BIT(3)
+#define DFSDM_CR2_ROVRIE(v)    FIELD_PREP(DFSDM_CR2_ROVRIE_MASK, v)
+#define DFSDM_CR2_AWDIE_MASK   BIT(4)
+#define DFSDM_CR2_AWDIE(v)     FIELD_PREP(DFSDM_CR2_AWDIE_MASK, v)
+#define DFSDM_CR2_SCDIE_MASK   BIT(5)
+#define DFSDM_CR2_SCDIE(v)     FIELD_PREP(DFSDM_CR2_SCDIE_MASK, v)
+#define DFSDM_CR2_CKABIE_MASK  BIT(6)
+#define DFSDM_CR2_CKABIE(v)    FIELD_PREP(DFSDM_CR2_CKABIE_MASK, v)
+#define DFSDM_CR2_EXCH_MASK    GENMASK(15, 8)
+#define DFSDM_CR2_EXCH(v)      FIELD_PREP(DFSDM_CR2_EXCH_MASK, v)
+#define DFSDM_CR2_AWDCH_MASK   GENMASK(23, 16)
+#define DFSDM_CR2_AWDCH(v)     FIELD_PREP(DFSDM_CR2_AWDCH_MASK, v)
+
+/* ISR: Interrupt status register */
+#define DFSDM_ISR_JEOCF_MASK   BIT(0)
+#define DFSDM_ISR_JEOCF(v)     FIELD_PREP(DFSDM_ISR_JEOCF_MASK, v)
+#define DFSDM_ISR_REOCF_MASK   BIT(1)
+#define DFSDM_ISR_REOCF(v)     FIELD_PREP(DFSDM_ISR_REOCF_MASK, v)
+#define DFSDM_ISR_JOVRF_MASK   BIT(2)
+#define DFSDM_ISR_JOVRF(v)     FIELD_PREP(DFSDM_ISR_JOVRF_MASK, v)
+#define DFSDM_ISR_ROVRF_MASK   BIT(3)
+#define DFSDM_ISR_ROVRF(v)     FIELD_PREP(DFSDM_ISR_ROVRF_MASK, v)
+#define DFSDM_ISR_AWDF_MASK    BIT(4)
+#define DFSDM_ISR_AWDF(v)      FIELD_PREP(DFSDM_ISR_AWDF_MASK, v)
+#define DFSDM_ISR_JCIP_MASK    BIT(13)
+#define DFSDM_ISR_JCIP(v)      FIELD_PREP(DFSDM_ISR_JCIP_MASK, v)
+#define DFSDM_ISR_RCIP_MASK    BIT(14)
+#define DFSDM_ISR_RCIP(v)      FIELD_PREP(DFSDM_ISR_RCIP, v)
+#define DFSDM_ISR_CKABF_MASK   GENMASK(23, 16)
+#define DFSDM_ISR_CKABF(v)     FIELD_PREP(DFSDM_ISR_CKABF_MASK, v)
+#define DFSDM_ISR_SCDF_MASK    GENMASK(31, 24)
+#define DFSDM_ISR_SCDF(v)      FIELD_PREP(DFSDM_ISR_SCDF_MASK, v)
+
+/* ICR: Interrupt flag clear register */
+#define DFSDM_ICR_CLRJOVRF_MASK              BIT(2)
+#define DFSDM_ICR_CLRJOVRF(v)        FIELD_PREP(DFSDM_ICR_CLRJOVRF_MASK, v)
+#define DFSDM_ICR_CLRROVRF_MASK              BIT(3)
+#define DFSDM_ICR_CLRROVRF(v)        FIELD_PREP(DFSDM_ICR_CLRROVRF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_MASK              GENMASK(23, 16)
+#define DFSDM_ICR_CLRCKABF(v)        FIELD_PREP(DFSDM_ICR_CLRCKABF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_CH_MASK(y) BIT(16 + (y))
+#define DFSDM_ICR_CLRCKABF_CH(v, y)   \
+                          (((v) << (16 + (y))) & DFSDM_ICR_CLRCKABF_CH_MASK(y))
+#define DFSDM_ICR_CLRSCDF_MASK       GENMASK(31, 24)
+#define DFSDM_ICR_CLRSCDF(v)         FIELD_PREP(DFSDM_ICR_CLRSCDF_MASK, v)
+#define DFSDM_ICR_CLRSCDF_CH_MASK(y)  BIT(24 + (y))
+#define DFSDM_ICR_CLRSCDF_CH(v, y)    \
+                              (((v) << (24 + (y))) & DFSDM_ICR_CLRSCDF_MASK(y))
+
+/* FCR: Filter control register */
+#define DFSDM_FCR_IOSR_MASK    GENMASK(7, 0)
+#define DFSDM_FCR_IOSR(v)      FIELD_PREP(DFSDM_FCR_IOSR_MASK, v)
+#define DFSDM_FCR_FOSR_MASK    GENMASK(25, 16)
+#define DFSDM_FCR_FOSR(v)      FIELD_PREP(DFSDM_FCR_FOSR_MASK, v)
+#define DFSDM_FCR_FORD_MASK    GENMASK(31, 29)
+#define DFSDM_FCR_FORD(v)      FIELD_PREP(DFSDM_FCR_FORD_MASK, v)
+
+/* RDATAR: Filter data register for regular channel */
+#define DFSDM_DATAR_CH_MASK    GENMASK(2, 0)
+#define DFSDM_DATAR_DATA_OFFSET 8
+#define DFSDM_DATAR_DATA_MASK  GENMASK(31, DFSDM_DATAR_DATA_OFFSET)
+
+/* AWLTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWLTR_BKAWL_MASK GENMASK(3, 0)
+#define DFSDM_AWLTR_BKAWL(v)   FIELD_PREP(DFSDM_AWLTR_BKAWL_MASK, v)
+#define DFSDM_AWLTR_AWLT_MASK  GENMASK(31, 8)
+#define DFSDM_AWLTR_AWLT(v)    FIELD_PREP(DFSDM_AWLTR_AWLT_MASK, v)
+
+/* AWHTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWHTR_BKAWH_MASK GENMASK(3, 0)
+#define DFSDM_AWHTR_BKAWH(v)   FIELD_PREP(DFSDM_AWHTR_BKAWH_MASK, v)
+#define DFSDM_AWHTR_AWHT_MASK  GENMASK(31, 8)
+#define DFSDM_AWHTR_AWHT(v)    FIELD_PREP(DFSDM_AWHTR_AWHT_MASK, v)
+
+/* AWSR: Filter watchdog status register */
+#define DFSDM_AWSR_AWLTF_MASK  GENMASK(7, 0)
+#define DFSDM_AWSR_AWLTF(v)    FIELD_PREP(DFSDM_AWSR_AWLTF_MASK, v)
+#define DFSDM_AWSR_AWHTF_MASK  GENMASK(15, 8)
+#define DFSDM_AWSR_AWHTF(v)    FIELD_PREP(DFSDM_AWSR_AWHTF_MASK, v)
+
+/* AWCFR: Filter watchdog status register */
+#define DFSDM_AWCFR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWCFR_AWLTF(v)   FIELD_PREP(DFSDM_AWCFR_AWLTF_MASK, v)
+#define DFSDM_AWCFR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWCFR_AWHTF(v)   FIELD_PREP(DFSDM_AWCFR_AWHTF_MASK, v)
+
+/* DFSDM filter order  */
+enum stm32_dfsdm_sinc_order {
+       DFSDM_FASTSINC_ORDER, /* FastSinc filter type */
+       DFSDM_SINC1_ORDER,    /* Sinc 1 filter type */
+       DFSDM_SINC2_ORDER,    /* Sinc 2 filter type */
+       DFSDM_SINC3_ORDER,    /* Sinc 3 filter type */
+       DFSDM_SINC4_ORDER,    /* Sinc 4 filter type (N.A. for watchdog) */
+       DFSDM_SINC5_ORDER,    /* Sinc 5 filter type (N.A. for watchdog) */
+       DFSDM_NB_SINC_ORDER,
+};
+
+/**
+ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
+ * @iosr: integrator oversampling
+ * @fosr: filter oversampling
+ * @ford: filter order
+ * @res: output sample resolution
+ * @sync_mode: filter synchronized with filter 0
+ * @fast: filter fast mode
+ */
+struct stm32_dfsdm_filter {
+       unsigned int iosr;
+       unsigned int fosr;
+       enum stm32_dfsdm_sinc_order ford;
+       u64 res;
+       unsigned int sync_mode;
+       unsigned int fast;
+};
+
+/**
+ * struct stm32_dfsdm_channel - structure relative to stm32 FDSDM channel
+ * @id: id of the channel
+ * @type: interface type linked to stm32_dfsdm_chan_type
+ * @src: interface type linked to stm32_dfsdm_chan_src
+ * @alt_si: alternative serial input interface
+ */
+struct stm32_dfsdm_channel {
+       unsigned int id;
+       unsigned int type;
+       unsigned int src;
+       unsigned int alt_si;
+};
+
+/**
+ * struct stm32_dfsdm - stm32 FDSDM driver common data (for all instances)
+ * @base:      control registers base cpu addr
+ * @phys_base: DFSDM IP register physical address
+ * @regmap:    regmap for register read/write
+ * @fl_list:   filter resources list
+ * @num_fls:   number of filter resources available
+ * @ch_list:   channel resources list
+ * @num_chs:   number of channel resources available
+ * @spi_master_freq: SPI clock out frequency
+ */
+struct stm32_dfsdm {
+       void __iomem    *base;
+       phys_addr_t     phys_base;
+       struct regmap *regmap;
+       struct stm32_dfsdm_filter *fl_list;
+       unsigned int num_fls;
+       struct stm32_dfsdm_channel *ch_list;
+       unsigned int num_chs;
+       unsigned int spi_master_freq;
+};
+
+/* DFSDM channel serial spi clock source */
+enum stm32_dfsdm_spi_clk_src {
+       DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL,
+       DFSDM_CHANNEL_SPI_CLOCK_INTERNAL,
+       DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING,
+       DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING
+};
+
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm);
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm);
+
+#endif
index 4ffd3db..338774c 100644 (file)
@@ -29,6 +29,16 @@ config IIO_BUFFER_DMAENGINE
 
          Should be selected by drivers that want to use this functionality.
 
+config IIO_BUFFER_HW_CONSUMER
+       tristate "Industrial I/O HW buffering"
+       help
+         Provides a way to bonding when an IIO device has a direct connection
+         to another device in hardware. In this case buffers for data transfers
+         are handled by hardware.
+
+         Should be selected by drivers that want to use the generic Hw consumer
+         interface.
+
 config IIO_KFIFO_BUF
        tristate "Industrial I/O buffering based on kfifo"
        help
index 95f9f41..1403eb2 100644 (file)
@@ -7,5 +7,6 @@
 obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o
 obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o
 obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o
+obj-$(CONFIG_IIO_BUFFER_HW_CONSUMER) += industrialio-hw-consumer.o
 obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
 obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
index 4847534..ea63c83 100644 (file)
@@ -104,6 +104,17 @@ error_free_cb_buff:
 }
 EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
 
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff,
+                                       size_t watermark)
+{
+       if (!watermark)
+               return -EINVAL;
+       cb_buff->buffer.watermark = watermark;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark);
+
 int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
 {
        return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
new file mode 100644 (file)
index 0000000..9516569
--- /dev/null
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/buffer_impl.h>
+
+/**
+ * struct iio_hw_consumer - IIO hw consumer block
+ * @buffers: hardware buffers list head.
+ * @channels: IIO provider channels.
+ */
+struct iio_hw_consumer {
+       struct list_head buffers;
+       struct iio_channel *channels;
+};
+
+struct hw_consumer_buffer {
+       struct list_head head;
+       struct iio_dev *indio_dev;
+       struct iio_buffer buffer;
+       long scan_mask[];
+};
+
+static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer(
+       struct iio_buffer *buffer)
+{
+       return container_of(buffer, struct hw_consumer_buffer, buffer);
+}
+
+static void iio_hw_buf_release(struct iio_buffer *buffer)
+{
+       struct hw_consumer_buffer *hw_buf =
+               iio_buffer_to_hw_consumer_buffer(buffer);
+       kfree(hw_buf);
+}
+
+static const struct iio_buffer_access_funcs iio_hw_buf_access = {
+       .release = &iio_hw_buf_release,
+       .modes = INDIO_BUFFER_HARDWARE,
+};
+
+static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
+       struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
+{
+       size_t mask_size = BITS_TO_LONGS(indio_dev->masklength) * sizeof(long);
+       struct hw_consumer_buffer *buf;
+
+       list_for_each_entry(buf, &hwc->buffers, head) {
+               if (buf->indio_dev == indio_dev)
+                       return buf;
+       }
+
+       buf = kzalloc(sizeof(*buf) + mask_size, GFP_KERNEL);
+       if (!buf)
+               return NULL;
+
+       buf->buffer.access = &iio_hw_buf_access;
+       buf->indio_dev = indio_dev;
+       buf->buffer.scan_mask = buf->scan_mask;
+
+       iio_buffer_init(&buf->buffer);
+       list_add_tail(&buf->head, &hwc->buffers);
+
+       return buf;
+}
+
+/**
+ * iio_hw_consumer_alloc() - Allocate IIO hardware consumer
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a valid iio_hw_consumer on success or a ERR_PTR() on failure.
+ */
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev)
+{
+       struct hw_consumer_buffer *buf;
+       struct iio_hw_consumer *hwc;
+       struct iio_channel *chan;
+       int ret;
+
+       hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+       if (!hwc)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&hwc->buffers);
+
+       hwc->channels = iio_channel_get_all(dev);
+       if (IS_ERR(hwc->channels)) {
+               ret = PTR_ERR(hwc->channels);
+               goto err_free_hwc;
+       }
+
+       chan = &hwc->channels[0];
+       while (chan->indio_dev) {
+               buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev);
+               if (!buf) {
+                       ret = -ENOMEM;
+                       goto err_put_buffers;
+               }
+               set_bit(chan->channel->scan_index, buf->buffer.scan_mask);
+               chan++;
+       }
+
+       return hwc;
+
+err_put_buffers:
+       list_for_each_entry(buf, &hwc->buffers, head)
+               iio_buffer_put(&buf->buffer);
+       iio_channel_release_all(hwc->channels);
+err_free_hwc:
+       kfree(hwc);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc);
+
+/**
+ * iio_hw_consumer_free() - Free IIO hardware consumer
+ * @hwc: hw consumer to free.
+ */
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
+{
+       struct hw_consumer_buffer *buf, *n;
+
+       iio_channel_release_all(hwc->channels);
+       list_for_each_entry_safe(buf, n, &hwc->buffers, head)
+               iio_buffer_put(&buf->buffer);
+       kfree(hwc);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
+
+static void devm_iio_hw_consumer_release(struct device *dev, void *res)
+{
+       iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
+}
+
+static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
+{
+       struct iio_hw_consumer **r = res;
+
+       if (!r || !*r) {
+               WARN_ON(!r || !*r);
+               return 0;
+       }
+       return *r == data;
+}
+
+/**
+ * devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
+ * @dev: Pointer to consumer device.
+ *
+ * Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
+ * is automatically freed on driver detach.
+ *
+ * If an iio_hw_consumer allocated with this function needs to be freed
+ * separately, devm_iio_hw_consumer_free() must be used.
+ *
+ * returns pointer to allocated iio_hw_consumer on success, NULL on failure.
+ */
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
+{
+       struct iio_hw_consumer **ptr, *iio_hwc;
+
+       ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
+                          GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       iio_hwc = iio_hw_consumer_alloc(dev);
+       if (IS_ERR(iio_hwc)) {
+               devres_free(ptr);
+       } else {
+               *ptr = iio_hwc;
+               devres_add(dev, ptr);
+       }
+
+       return iio_hwc;
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
+
+/**
+ * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
+ * @dev: Pointer to consumer device.
+ * @hwc: iio_hw_consumer to free.
+ *
+ * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
+ */
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
+{
+       int rc;
+
+       rc = devres_release(dev, devm_iio_hw_consumer_release,
+                           devm_iio_hw_consumer_match, hwc);
+       WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
+
+/**
+ * iio_hw_consumer_enable() - Enable IIO hardware consumer
+ * @hwc: iio_hw_consumer to enable.
+ *
+ * Returns 0 on success.
+ */
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc)
+{
+       struct hw_consumer_buffer *buf;
+       int ret;
+
+       list_for_each_entry(buf, &hwc->buffers, head) {
+               ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL);
+               if (ret)
+                       goto err_disable_buffers;
+       }
+
+       return 0;
+
+err_disable_buffers:
+       list_for_each_entry_continue_reverse(buf, &hwc->buffers, head)
+               iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_enable);
+
+/**
+ * iio_hw_consumer_disable() - Disable IIO hardware consumer
+ * @hwc: iio_hw_consumer to disable.
+ */
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc)
+{
+       struct hw_consumer_buffer *buf;
+
+       list_for_each_entry(buf, &hwc->buffers, head)
+               iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_disable);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework");
+MODULE_LICENSE("GPL v2");
index 203ffb9..147a8c1 100644 (file)
@@ -371,7 +371,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
                mutex_unlock(&indio_dev->mlock);
                break;
        case IIO_CHAN_INFO_SCALE:
-               *val = 1;  /* 0.0625 */
+               *val = 1000;  /* 62.5 */
                *val2 = 16;
                ret = IIO_VAL_FRACTIONAL;
                break;
index 9c4cfd1..2f0998e 100644 (file)
@@ -631,7 +631,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
  * iio_format_value() - Formats a IIO value into its string representation
  * @buf:       The buffer to which the formatted value gets written
  *             which is assumed to be big enough (i.e. PAGE_SIZE).
- * @type:      One of the IIO_VAL_... constants. This decides how the val
+ * @type:      One of the IIO_VAL_* constants. This decides how the val
  *             and val2 parameters are formatted.
  * @size:      Number of IIO value entries contained in vals
  * @vals:      Pointer to the values, exact meaning depends on the
@@ -639,7 +639,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
  *
  * Return: 0 by default, a negative number on failure or the
  *        total number of characters written for a type that belongs
- *        to the IIO_VAL_... constant.
+ *        to the IIO_VAL_* constant.
  */
 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
 {
index 069defc..ec98790 100644 (file)
@@ -664,9 +664,8 @@ err_unlock:
 }
 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
 
-static int iio_read_channel_attribute(struct iio_channel *chan,
-                                     int *val, int *val2,
-                                     enum iio_chan_info_enum attribute)
+int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
+                              enum iio_chan_info_enum attribute)
 {
        int ret;
 
@@ -682,6 +681,7 @@ err_unlock:
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
 
 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
 {
@@ -850,7 +850,8 @@ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
                                                chan->channel, val, val2, info);
 }
 
-int iio_write_channel_raw(struct iio_channel *chan, int val)
+int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+                               enum iio_chan_info_enum attribute)
 {
        int ret;
 
@@ -860,12 +861,18 @@ int iio_write_channel_raw(struct iio_channel *chan, int val)
                goto err_unlock;
        }
 
-       ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
+       ret = iio_channel_write(chan, val, val2, attribute);
 err_unlock:
        mutex_unlock(&chan->indio_dev->info_exist_lock);
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
+
+int iio_write_channel_raw(struct iio_channel *chan, int val)
+{
+       return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
+}
 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
 
 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
index 53c5d65..df23dbc 100644 (file)
@@ -869,6 +869,7 @@ static int sx9500_init_device(struct iio_dev *indio_dev)
 static void sx9500_gpio_probe(struct i2c_client *client,
                              struct sx9500_data *data)
 {
+       struct gpio_desc *gpiod_int;
        struct device *dev;
 
        if (!client)
@@ -876,6 +877,14 @@ static void sx9500_gpio_probe(struct i2c_client *client,
 
        dev = &client->dev;
 
+       if (client->irq <= 0) {
+               gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN);
+               if (IS_ERR(gpiod_int))
+                       dev_err(dev, "gpio get irq failed\n");
+               else
+                       client->irq = gpiod_to_irq(gpiod_int);
+       }
+
        data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH);
        if (IS_ERR(data->gpiod_rst)) {
                dev_warn(dev, "gpio get reset pin failed\n");
index 98ac46e..cbf1865 100644 (file)
@@ -1,6 +1,6 @@
 menuconfig INFINIBAND
        tristate "InfiniBand support"
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAS_DMA
        depends on NET
        depends on INET
        depends on m || IPV6 != m
index 1fdb473..6294a70 100644 (file)
@@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
        INIT_LIST_HEAD(&id_priv->mc_list);
        get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
        id_priv->id.route.addr.dev_addr.net = get_net(net);
+       id_priv->seq_num &= 0x00ffffff;
 
        return &id_priv->id;
 }
@@ -4457,7 +4458,7 @@ out:
        return skb->len;
 }
 
-static const struct rdma_nl_cbs cma_cb_table[] = {
+static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
        [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
 };
 
index a1d687a..66f0268 100644 (file)
@@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
 }
 #endif
 
-struct ib_device *__ib_device_get_by_index(u32 ifindex);
+struct ib_device *ib_device_get_by_index(u32 ifindex);
 /* RDMA device netlink */
 void nldev_init(void);
 void nldev_exit(void);
index 84fc32a..4655206 100644 (file)
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
        return 0;
 }
 
-struct ib_device *__ib_device_get_by_index(u32 index)
+static struct ib_device *__ib_device_get_by_index(u32 index)
 {
        struct ib_device *device;
 
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
        return NULL;
 }
 
+/*
+ * Caller is responsible to return refrerence count by calling put_device()
+ */
+struct ib_device *ib_device_get_by_index(u32 index)
+{
+       struct ib_device *device;
+
+       down_read(&lists_rwsem);
+       device = __ib_device_get_by_index(index);
+       if (device)
+               get_device(&device->dev);
+
+       up_read(&lists_rwsem);
+       return device;
+}
+
 static struct ib_device *__ib_device_get_by_name(const char *name)
 {
        struct ib_device *device;
@@ -1146,7 +1162,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
 }
 EXPORT_SYMBOL(ib_get_net_dev_by_params);
 
-static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
+static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
        [RDMA_NL_LS_OP_RESOLVE] = {
                .doit = ib_nl_handle_resolve_resp,
                .flags = RDMA_NL_ADMIN_PERM,
@@ -1253,5 +1269,5 @@ static void __exit ib_core_cleanup(void)
 
 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
 
-module_init(ib_core_init);
+subsys_initcall(ib_core_init);
 module_exit(ib_core_cleanup);
index e9e189e..5d676cf 100644 (file)
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
 }
 EXPORT_SYMBOL(iwcm_reject_msg);
 
-static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
+static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
        [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
        [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
        [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
index 2fae850..0dcd1aa 100644 (file)
@@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
 
-       device = __ib_device_get_by_index(index);
+       device = ib_device_get_by_index(index);
        if (!device)
                return -EINVAL;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg)
-               return -ENOMEM;
+       if (!msg) {
+               err = -ENOMEM;
+               goto err;
+       }
 
        nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
                        RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
                        0, 0);
 
        err = fill_dev_info(msg, device);
-       if (err) {
-               nlmsg_free(msg);
-               return err;
-       }
+       if (err)
+               goto err_free;
 
        nlmsg_end(msg, nlh);
 
+       put_device(&device->dev);
        return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+       nlmsg_free(msg);
+err:
+       put_device(&device->dev);
+       return err;
 }
 
 static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
                return -EINVAL;
 
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = __ib_device_get_by_index(index);
+       device = ib_device_get_by_index(index);
        if (!device)
                return -EINVAL;
 
        port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
-       if (!rdma_is_port_valid(device, port))
-               return -EINVAL;
+       if (!rdma_is_port_valid(device, port)) {
+               err = -EINVAL;
+               goto err;
+       }
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg)
-               return -ENOMEM;
+       if (!msg) {
+               err = -ENOMEM;
+               goto err;
+       }
 
        nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
                        RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
                        0, 0);
 
        err = fill_port_info(msg, device, port);
-       if (err) {
-               nlmsg_free(msg);
-               return err;
-       }
+       if (err)
+               goto err_free;
 
        nlmsg_end(msg, nlh);
+       put_device(&device->dev);
 
        return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+       nlmsg_free(msg);
+err:
+       put_device(&device->dev);
+       return err;
 }
 
 static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
                return -EINVAL;
 
        ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = __ib_device_get_by_index(ifindex);
+       device = ib_device_get_by_index(ifindex);
        if (!device)
                return -EINVAL;
 
@@ -299,11 +315,13 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
                nlmsg_end(skb, nlh);
        }
 
-out:   cb->args[0] = idx;
+out:
+       put_device(&device->dev);
+       cb->args[0] = idx;
        return skb->len;
 }
 
-static const struct rdma_nl_cbs nldev_cb_table[] = {
+static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
        [RDMA_NLDEV_CMD_GET] = {
                .doit = nldev_get_doit,
                .dump = nldev_get_dumpit,
index 23278ed..59b2f96 100644 (file)
@@ -386,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
        if (ret)
                return ret;
 
+       if (!qp->qp_sec)
+               return 0;
+
        mutex_lock(&real_qp->qp_sec->mutex);
        ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
                                          qp->qp_sec);
@@ -417,8 +420,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
 
 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
 {
+       u8 i = rdma_start_port(dev);
+       bool is_ib = false;
        int ret;
 
+       while (i <= rdma_end_port(dev) && !is_ib)
+               is_ib = rdma_protocol_ib(dev, i++);
+
+       /* If this isn't an IB device don't create the security context */
+       if (!is_ib)
+               return 0;
+
        qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
        if (!qp->qp_sec)
                return -ENOMEM;
@@ -441,6 +453,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
 
 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
 {
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        mutex_lock(&sec->mutex);
 
        /* Remove the QP from the lists so it won't get added to
@@ -470,6 +486,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
        int ret;
        int i;
 
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        /* If a concurrent cache update is in progress this
         * QP security could be marked for an error state
         * transition.  Wait for this to complete.
@@ -505,6 +525,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
 {
        int i;
 
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        /* If a concurrent cache update is occurring we must
         * wait until this QP security structure is processed
         * in the QP to error flow before destroying it because
@@ -557,7 +581,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
 {
        int ret = 0;
        struct ib_ports_pkeys *tmp_pps;
-       struct ib_ports_pkeys *new_pps;
+       struct ib_ports_pkeys *new_pps = NULL;
        struct ib_qp *real_qp = qp->real_qp;
        bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
                           real_qp->qp_type == IB_QPT_GSI ||
@@ -565,18 +589,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
        bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
                           (qp_attr_mask & IB_QP_ALT_PATH));
 
+       WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
+                  rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
+                  !real_qp->qp_sec),
+                  "%s: QP security is not initialized for IB QP: %d\n",
+                  __func__, real_qp->qp_num);
+
        /* The port/pkey settings are maintained only for the real QP. Open
         * handles on the real QP will be in the shared_qp_list. When
         * enforcing security on the real QP all the shared QPs will be
         * checked as well.
         */
 
-       if (pps_change && !special_qp) {
+       if (pps_change && !special_qp && real_qp->qp_sec) {
                mutex_lock(&real_qp->qp_sec->mutex);
                new_pps = get_new_pps(real_qp,
                                      qp_attr,
                                      qp_attr_mask);
-
+               if (!new_pps) {
+                       mutex_unlock(&real_qp->qp_sec->mutex);
+                       return -ENOMEM;
+               }
                /* Add this QP to the lists for the new port
                 * and pkey settings before checking for permission
                 * in case there is a concurrent cache update
@@ -600,7 +633,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
                                                 qp_attr_mask,
                                                 udata);
 
-       if (pps_change && !special_qp) {
+       if (new_pps) {
                /* Clean up the lists and free the appropriate
                 * ports_pkeys structure.
                 */
@@ -631,6 +664,9 @@ int ib_security_pkey_access(struct ib_device *dev,
        u16 pkey;
        int ret;
 
+       if (!rdma_protocol_ib(dev, port_num))
+               return 0;
+
        ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
        if (ret)
                return ret;
@@ -665,6 +701,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 {
        int ret;
 
+       if (!rdma_protocol_ib(agent->device, agent->port_num))
+               return 0;
+
        ret = security_ib_alloc_security(&agent->security);
        if (ret)
                return ret;
@@ -690,6 +729,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 
 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
 {
+       if (!rdma_protocol_ib(agent->device, agent->port_num))
+               return;
+
        security_ib_free_security(agent->security);
        if (agent->lsm_nb_reg)
                unregister_lsm_notifier(&agent->lsm_nb);
@@ -697,8 +739,14 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
 
 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
 {
-       if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
-               return -EACCES;
+       if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
+               return 0;
+
+       if (map->agent.qp->qp_type == IB_QPT_SMI) {
+               if (!map->agent.smp_allowed)
+                       return -EACCES;
+               return 0;
+       }
 
        return ib_security_pkey_access(map->agent.device,
                                       map->agent.port_num,
index 21e60b1..130606c 100644 (file)
@@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        sg_list_start = umem->sg_head.sgl;
 
        while (npages) {
-               ret = get_user_pages(cur_base,
+               ret = get_user_pages_longterm(cur_base,
                                     min_t(unsigned long, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
                                     gup_flags, page_list, vma_list);
index 16d5571..840b240 100644 (file)
@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
                goto release_qp;
        }
 
+       if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
+           !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
+               ret = -EINVAL;
+               goto release_qp;
+       }
+
        attr->qp_state            = cmd->base.qp_state;
        attr->cur_qp_state        = cmd->base.cur_qp_state;
        attr->path_mtu            = cmd->base.path_mtu;
@@ -2068,8 +2074,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
                return -EOPNOTSUPP;
 
        if (ucore->inlen > sizeof(cmd)) {
-               if (ib_is_udata_cleared(ucore, sizeof(cmd),
-                                       ucore->inlen - sizeof(cmd)))
+               if (!ib_is_udata_cleared(ucore, sizeof(cmd),
+                                        ucore->inlen - sizeof(cmd)))
                        return -EOPNOTSUPP;
        }
 
index 3fb8fb6..e36d27e 100644 (file)
@@ -1438,7 +1438,8 @@ int ib_close_qp(struct ib_qp *qp)
        spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 
        atomic_dec(&real_qp->usecnt);
-       ib_close_shared_qp_security(qp->qp_sec);
+       if (qp->qp_sec)
+               ib_close_shared_qp_security(qp->qp_sec);
        kfree(qp);
 
        return 0;
index ea55e95..6f2b261 100644 (file)
@@ -395,6 +395,11 @@ next_cqe:
 
 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 {
+       if (DRAIN_CQE(cqe)) {
+               WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
+               return 0;
+       }
+
        if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
                return 0;
 
@@ -489,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
        /*
         * Special cqe for drain WR completions...
         */
-       if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+       if (DRAIN_CQE(hw_cqe)) {
                *cookie = CQE_DRAIN_COOKIE(hw_cqe);
                *cqe = *hw_cqe;
                goto skip_cqe;
@@ -566,10 +571,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                        ret = -EAGAIN;
                        goto skip_cqe;
                }
-               if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+               if (unlikely(!CQE_STATUS(hw_cqe) &&
+                            CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
                        t4_set_wq_in_error(wq);
-                       hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
-                       goto proc_cqe;
+                       hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
                }
                goto proc_cqe;
        }
@@ -743,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                                c4iw_invalidate_mr(qhp->rhp,
                                                   CQE_WRID_FR_STAG(&cqe));
                        break;
-               case C4IW_DRAIN_OPCODE:
-                       wc->opcode = IB_WC_SEND;
-                       break;
                default:
                        pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
index 470f97a..65dd372 100644 (file)
@@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
        return IB_QPS_ERR;
 }
 
-#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
-
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
index 5ee7fe4..d5c92fc 100644 (file)
@@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
        return 0;
 }
 
-static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+static int ib_to_fw_opcode(int ib_opcode)
+{
+       int opcode;
+
+       switch (ib_opcode) {
+       case IB_WR_SEND_WITH_INV:
+               opcode = FW_RI_SEND_WITH_INV;
+               break;
+       case IB_WR_SEND:
+               opcode = FW_RI_SEND;
+               break;
+       case IB_WR_RDMA_WRITE:
+               opcode = FW_RI_RDMA_WRITE;
+               break;
+       case IB_WR_RDMA_READ:
+       case IB_WR_RDMA_READ_WITH_INV:
+               opcode = FW_RI_READ_REQ;
+               break;
+       case IB_WR_REG_MR:
+               opcode = FW_RI_FAST_REGISTER;
+               break;
+       case IB_WR_LOCAL_INV:
+               opcode = FW_RI_LOCAL_INV;
+               break;
+       default:
+               opcode = -EINVAL;
+       }
+       return opcode;
+}
+
+static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
 {
        struct t4_cqe cqe = {};
        struct c4iw_cq *schp;
        unsigned long flag;
        struct t4_cq *cq;
+       int opcode;
 
        schp = to_c4iw_cq(qhp->ibqp.send_cq);
        cq = &schp->cq;
 
+       opcode = ib_to_fw_opcode(wr->opcode);
+       if (opcode < 0)
+               return opcode;
+
        cqe.u.drain_cookie = wr->wr_id;
        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
-                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_OPCODE_V(opcode) |
                                 CQE_TYPE_V(1) |
                                 CQE_SWCQE_V(1) |
+                                CQE_DRAIN_V(1) |
                                 CQE_QPID_V(qhp->wq.sq.qid));
 
        spin_lock_irqsave(&schp->lock, flag);
@@ -819,6 +855,23 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
                                           schp->ibcq.cq_context);
                spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
        }
+       return 0;
+}
+
+static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
+                               struct ib_send_wr **bad_wr)
+{
+       int ret = 0;
+
+       while (wr) {
+               ret = complete_sq_drain_wr(qhp, wr);
+               if (ret) {
+                       *bad_wr = wr;
+                       break;
+               }
+               wr = wr->next;
+       }
+       return ret;
 }
 
 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -833,9 +886,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
 
        cqe.u.drain_cookie = wr->wr_id;
        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
-                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_OPCODE_V(FW_RI_SEND) |
                                 CQE_TYPE_V(0) |
                                 CQE_SWCQE_V(1) |
+                                CQE_DRAIN_V(1) |
                                 CQE_QPID_V(qhp->wq.sq.qid));
 
        spin_lock_irqsave(&rchp->lock, flag);
@@ -852,6 +906,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
        }
 }
 
+static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+       while (wr) {
+               complete_rq_drain_wr(qhp, wr);
+               wr = wr->next;
+       }
+}
+
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                   struct ib_send_wr **bad_wr)
 {
@@ -868,9 +930,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
        qhp = to_c4iw_qp(ibqp);
        spin_lock_irqsave(&qhp->lock, flag);
-       if (t4_wq_in_error(&qhp->wq)) {
+
+       /*
+        * If the qp has been flushed, then just insert a special
+        * drain cqe.
+        */
+       if (qhp->wq.flushed) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               complete_sq_drain_wr(qhp, wr);
+               err = complete_sq_drain_wrs(qhp, wr, bad_wr);
                return err;
        }
        num_wrs = t4_sq_avail(&qhp->wq);
@@ -1011,9 +1078,14 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
        qhp = to_c4iw_qp(ibqp);
        spin_lock_irqsave(&qhp->lock, flag);
-       if (t4_wq_in_error(&qhp->wq)) {
+
+       /*
+        * If the qp has been flushed, then just insert a special
+        * drain cqe.
+        */
+       if (qhp->wq.flushed) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               complete_rq_drain_wr(qhp, wr);
+               complete_rq_drain_wrs(qhp, wr);
                return err;
        }
        num_wrs = t4_rq_avail(&qhp->wq);
@@ -1285,21 +1357,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
        spin_unlock_irqrestore(&rchp->lock, flag);
 
        if (schp == rchp) {
-               if (t4_clear_cq_armed(&rchp->cq) &&
-                   (rq_flushed || sq_flushed)) {
+               if ((rq_flushed || sq_flushed) &&
+                   t4_clear_cq_armed(&rchp->cq)) {
                        spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                        (*rchp->ibcq.comp_handler)(&rchp->ibcq,
                                                   rchp->ibcq.cq_context);
                        spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
                }
        } else {
-               if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
+               if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
                        spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                        (*rchp->ibcq.comp_handler)(&rchp->ibcq,
                                                   rchp->ibcq.cq_context);
                        spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
                }
-               if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
+               if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
                        spin_lock_irqsave(&schp->comp_handler_lock, flag);
                        (*schp->ibcq.comp_handler)(&schp->ibcq,
                                                   schp->ibcq.cq_context);
index e9ea942..79e8ee1 100644 (file)
@@ -197,6 +197,11 @@ struct t4_cqe {
 #define CQE_SWCQE_G(x)    ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
 #define CQE_SWCQE_V(x)   ((x)<<CQE_SWCQE_S)
 
+#define CQE_DRAIN_S       10
+#define CQE_DRAIN_M       0x1
+#define CQE_DRAIN_G(x)    ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
+#define CQE_DRAIN_V(x)   ((x)<<CQE_DRAIN_S)
+
 #define CQE_STATUS_S      5
 #define CQE_STATUS_M      0x1F
 #define CQE_STATUS_G(x)   ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
@@ -213,6 +218,7 @@ struct t4_cqe {
 #define CQE_OPCODE_V(x)   ((x)<<CQE_OPCODE_S)
 
 #define SW_CQE(x)         (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define DRAIN_CQE(x)      (CQE_DRAIN_G(be32_to_cpu((x)->header)))
 #define CQE_QPID(x)       (CQE_QPID_G(be32_to_cpu((x)->header)))
 #define CQE_TYPE(x)       (CQE_TYPE_G(be32_to_cpu((x)->header)))
 #define SQ_TYPE(x)       (CQE_TYPE((x)))
index 7750a9c..1df7da4 100644 (file)
@@ -763,11 +763,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
        }
 
        if (ret) {
-               hfi1_rcd_put(fd->uctxt);
-               fd->uctxt = NULL;
                spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
                __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
                spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
+               hfi1_rcd_put(fd->uctxt);
+               fd->uctxt = NULL;
        }
 
        return ret;
index 4a9b4d7..8ce9118 100644 (file)
@@ -1131,7 +1131,6 @@ struct hfi1_devdata {
        u16 pcie_lnkctl;
        u16 pcie_devctl2;
        u32 pci_msix0;
-       u32 pci_lnkctl3;
        u32 pci_tph2;
 
        /*
index 09e50fd..8c7e7a6 100644 (file)
@@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd)
        if (ret)
                goto error;
 
-       ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
-                                    dd->pci_lnkctl3);
-       if (ret)
-               goto error;
-
-       ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
-       if (ret)
-               goto error;
-
+       if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+               ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+                                            dd->pci_tph2);
+               if (ret)
+                       goto error;
+       }
        return 0;
 
 error:
@@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd)
        if (ret)
                goto error;
 
-       ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
-                                   &dd->pci_lnkctl3);
-       if (ret)
-               goto error;
-
-       ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
-       if (ret)
-               goto error;
-
+       if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+               ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+                                           &dd->pci_tph2);
+               if (ret)
+                       goto error;
+       }
        return 0;
 
 error:
index fd01a76..af5f793 100644 (file)
@@ -814,7 +814,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
        struct hfi1_16b_header *hdr = &opa_hdr->opah;
        struct ib_other_headers *ohdr;
-       u32 bth0, bth1;
+       u32 bth0, bth1 = 0;
        u16 len, pkey;
        u8 becn = !!is_fecn;
        u8 l4 = OPA_16B_L4_IB_LOCAL;
index 3e4c525..a40ec93 100644 (file)
@@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
 {
        int i;
        struct device *dev = hr_dev->dev;
-       u32 bits_per_long = BITS_PER_LONG;
 
        if (buf->nbufs == 1) {
                dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
        } else {
-               if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
-                       vunmap(buf->direct.buf);
-
                for (i = 0; i < buf->nbufs; ++i)
                        if (buf->page_list[i].buf)
                                dma_free_coherent(dev, 1 << buf->page_shift,
@@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 {
        int i = 0;
        dma_addr_t t;
-       struct page **pages;
        struct device *dev = hr_dev->dev;
-       u32 bits_per_long = BITS_PER_LONG;
        u32 page_size = 1 << page_shift;
        u32 order;
 
@@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
                        buf->page_list[i].map = t;
                        memset(buf->page_list[i].buf, 0, page_size);
                }
-               if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
-                       pages = kmalloc_array(buf->nbufs, sizeof(*pages),
-                                             GFP_KERNEL);
-                       if (!pages)
-                               goto err_free;
-
-                       for (i = 0; i < buf->nbufs; ++i)
-                               pages[i] = virt_to_page(buf->page_list[i].buf);
-
-                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
-                                              PAGE_KERNEL);
-                       kfree(pages);
-                       if (!buf->direct.buf)
-                               goto err_free;
-               } else {
-                       buf->direct.buf = NULL;
-               }
        }
 
        return 0;
index 01d3d69..b154ce4 100644 (file)
@@ -726,11 +726,9 @@ static inline struct hns_roce_qp
 
 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
 {
-       u32 bits_per_long_val = BITS_PER_LONG;
        u32 page_size = 1 << buf->page_shift;
 
-       if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
-           buf->nbufs == 1)
+       if (buf->nbufs == 1)
                return (char *)(buf->direct.buf) + offset;
        else
                return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
index 8b733a6..0eeabfb 100644 (file)
@@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
                        sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
                        chunk->npages = 0;
                        chunk->nsg = 0;
+                       memset(chunk->buf, 0, sizeof(chunk->buf));
                        list_add_tail(&chunk->list, &hem->chunk_list);
                }
 
@@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
                if (!buf)
                        goto fail;
 
-               sg_set_buf(mem, buf, PAGE_SIZE << order);
-               WARN_ON(mem->offset);
+               chunk->buf[chunk->npages] = buf;
                sg_dma_len(mem) = PAGE_SIZE << order;
 
                ++chunk->npages;
@@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
        list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
                for (i = 0; i < chunk->npages; ++i)
                        dma_free_coherent(hr_dev->dev,
-                                  chunk->mem[i].length,
-                                  lowmem_page_address(sg_page(&chunk->mem[i])),
+                                  sg_dma_len(&chunk->mem[i]),
+                                  chunk->buf[i],
                                   sg_dma_address(&chunk->mem[i]));
                kfree(chunk);
        }
@@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
        struct hns_roce_hem_chunk *chunk;
        struct hns_roce_hem_mhop mhop;
        struct hns_roce_hem *hem;
-       struct page *page = NULL;
+       void *addr = NULL;
        unsigned long mhop_obj = obj;
        unsigned long obj_per_chunk;
        unsigned long idx_offset;
        int offset, dma_offset;
+       int length;
        int i, j;
        u32 hem_idx = 0;
 
@@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
 
        list_for_each_entry(chunk, &hem->chunk_list, list) {
                for (i = 0; i < chunk->npages; ++i) {
+                       length = sg_dma_len(&chunk->mem[i]);
                        if (dma_handle && dma_offset >= 0) {
-                               if (sg_dma_len(&chunk->mem[i]) >
-                                   (u32)dma_offset)
+                               if (length > (u32)dma_offset)
                                        *dma_handle = sg_dma_address(
                                                &chunk->mem[i]) + dma_offset;
-                               dma_offset -= sg_dma_len(&chunk->mem[i]);
+                               dma_offset -= length;
                        }
 
-                       if (chunk->mem[i].length > (u32)offset) {
-                               page = sg_page(&chunk->mem[i]);
+                       if (length > (u32)offset) {
+                               addr = chunk->buf[i] + offset;
                                goto out;
                        }
-                       offset -= chunk->mem[i].length;
+                       offset -= length;
                }
        }
 
 out:
        mutex_unlock(&table->mutex);
-       return page ? lowmem_page_address(page) + offset : NULL;
+       return addr;
 }
 EXPORT_SYMBOL_GPL(hns_roce_table_find);
 
index db66db1..e8850d5 100644 (file)
@@ -78,6 +78,7 @@ struct hns_roce_hem_chunk {
        int                      npages;
        int                      nsg;
        struct scatterlist       mem[HNS_ROCE_HEM_CHUNK_LEN];
+       void                     *buf[HNS_ROCE_HEM_CHUNK_LEN];
 };
 
 struct hns_roce_hem {
index 8f719c0..8e18445 100644 (file)
@@ -1126,9 +1126,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 {
        struct hns_roce_v2_mpt_entry *mpt_entry;
        struct scatterlist *sg;
+       u64 page_addr;
        u64 *pages;
+       int i, j;
+       int len;
        int entry;
-       int i;
 
        mpt_entry = mb_buf;
        memset(mpt_entry, 0, sizeof(*mpt_entry));
@@ -1186,14 +1188,20 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 
        i = 0;
        for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
-               pages[i] = ((u64)sg_dma_address(sg)) >> 6;
-
-               /* Record the first 2 entry directly to MTPT table */
-               if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
-                       break;
-               i++;
+               len = sg_dma_len(sg) >> PAGE_SHIFT;
+               for (j = 0; j < len; ++j) {
+                       page_addr = sg_dma_address(sg) +
+                                   (j << mr->umem->page_shift);
+                       pages[i] = page_addr >> 6;
+
+                       /* Record the first 2 entry directly to MTPT table */
+                       if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+                               goto found;
+                       i++;
+               }
        }
 
+found:
        mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
        roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
                       V2_MPT_BYTE_56_PA0_H_S,
index 493d6ef..77870f9 100644 (file)
@@ -1043,7 +1043,7 @@ negotiate_done:
  * i40iw_schedule_cm_timer
  * @@cm_node: connection's node
  * @sqbuf: buffer to send
- * @type: if it es send ot close
+ * @type: if it is send or close
  * @send_retrans: if rexmits to be done
  * @close_when_complete: is cm_node to be removed
  *
@@ -1067,7 +1067,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
 
        new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
        if (!new_send) {
-               i40iw_free_sqbuf(vsi, (void *)sqbuf);
+               if (type != I40IW_TIMER_TYPE_CLOSE)
+                       i40iw_free_sqbuf(vsi, (void *)sqbuf);
                return -ENOMEM;
        }
        new_send->retrycount = I40IW_DEFAULT_RETRYS;
@@ -1082,7 +1083,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
                new_send->timetosend += (HZ / 10);
                if (cm_node->close_entry) {
                        kfree(new_send);
-                       i40iw_free_sqbuf(vsi, (void *)sqbuf);
                        i40iw_pr_err("already close entry\n");
                        return -EINVAL;
                }
@@ -2947,8 +2947,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
                        loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
                        cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
                        loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
-                       loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
-                       i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
                }
                return cm_node;
        }
@@ -3689,11 +3687,16 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        cm_id->add_ref(cm_id);
        i40iw_add_ref(&iwqp->ibqp);
 
-       i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
-
        attr.qp_state = IB_QPS_RTS;
        cm_node->qhash_set = false;
        i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+       cm_node->accelerated = 1;
+       status =
+               i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+       if (status)
+               i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
+
        if (cm_node->loopbackpartner) {
                cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
 
@@ -3704,7 +3707,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
        }
 
-       cm_node->accelerated = 1;
        if (cm_node->accept_pend) {
                atomic_dec(&cm_node->listener->pend_accepts_cnt);
                cm_node->accept_pend = 0;
@@ -3864,6 +3866,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                        goto err;
        }
 
+       if (cm_node->loopbackpartner) {
+               cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
+               i40iw_create_event(cm_node->loopbackpartner,
+                                  I40IW_CM_EVENT_MPA_REQ);
+       }
+
        i40iw_debug(cm_node->dev,
                    I40IW_DEBUG_CM,
                    "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
@@ -4044,9 +4052,6 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
        dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
        if (iwqp->page)
                kunmap(iwqp->page);
-       status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
-       if (status)
-               i40iw_pr_err("send cm event\n");
 
        memset(&attr, 0, sizeof(attr));
        attr.qp_state = IB_QPS_RTS;
@@ -4054,6 +4059,10 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
        i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
 
        cm_node->accelerated = 1;
+       status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+                                    0);
+       if (status)
+               i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
 
        return;
 
index d88c6cf..da9821a 100644 (file)
@@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
 
        ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
                                          &cqp->sdbuf,
-                                         128,
+                                         I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
                                          I40IW_SD_BUF_ALIGNMENT);
 
        if (ret_code)
@@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
 }
 
 /**
- * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
- * @cqp: struct for cqp hw
- * @wqe_idx: we index of cqp ring
+ * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
+ * @cqp: pointer to CQP structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index for next WQE on CQP SQ
  */
-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
+                                              u64 scratch, u32 *wqe_idx)
 {
        u64 *wqe = NULL;
-       u32     wqe_idx;
        enum i40iw_status_code ret_code;
 
        if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
@@ -616,20 +617,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
                            cqp->sq_ring.size);
                return NULL;
        }
-       I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+       I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
        cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
        if (ret_code)
                return NULL;
-       if (!wqe_idx)
+       if (!*wqe_idx)
                cqp->polarity = !cqp->polarity;
 
-       wqe = cqp->sq_base[wqe_idx].elem;
-       cqp->scratch_array[wqe_idx] = scratch;
+       wqe = cqp->sq_base[*wqe_idx].elem;
+       cqp->scratch_array[*wqe_idx] = scratch;
        I40IW_CQP_INIT_WQE(wqe);
 
        return wqe;
 }
 
+/**
+ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @scratch: private data for CQP WQE
+ */
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+{
+       u32 wqe_idx;
+
+       return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+}
+
 /**
  * i40iw_sc_cqp_destroy - destroy cqp during close
  * @cqp: struct for cqp hw
@@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
        u64 *wqe;
        int mem_entries, wqe_entries;
        struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
+       u64 offset;
+       u32 wqe_idx;
 
-       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
        if (!wqe)
                return I40IW_ERR_RING_FULL;
 
@@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
                 LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
 
        if (mem_entries) {
-               memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
-               data = sdbuf->pa;
+               offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
+               memcpy((char *)sdbuf->va + offset, &info->entry[3],
+                      mem_entries << 4);
+               data = (u64)sdbuf->pa + offset;
        } else {
                data = 0;
        }
index 65ec39e..029083c 100644 (file)
 #define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
 
 #define I40IWQPC_ARPIDX_SHIFT 48
-#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
+#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)
 
 #define I40IWQPC_FLOWLABEL_SHIFT 0
 #define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
@@ -1526,7 +1526,7 @@ enum i40iw_alignment {
        I40IW_AEQ_ALIGNMENT =           0x100,
        I40IW_CEQ_ALIGNMENT =           0x100,
        I40IW_CQ0_ALIGNMENT =           0x100,
-       I40IW_SD_BUF_ALIGNMENT =        0x100
+       I40IW_SD_BUF_ALIGNMENT =        0x80
 };
 
 #define I40IW_WQE_SIZE_64      64
@@ -1534,6 +1534,8 @@ enum i40iw_alignment {
 #define I40IW_QP_WQE_MIN_SIZE  32
 #define I40IW_QP_WQE_MAX_SIZE  128
 
+#define I40IW_UPDATE_SD_BUF_SIZE 128
+
 #define I40IW_CQE_QTYPE_RQ 0
 #define I40IW_CQE_QTYPE_SQ 1
 
index 313bfb9..4975f3e 100644 (file)
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
                goto err_free_mr;
 
        mr->max_pages = max_num_sg;
-
        err = mlx4_mr_enable(dev->dev, &mr->mmr);
        if (err)
                goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
        return &mr->ibmr;
 
 err_free_pl:
+       mr->ibmr.device = pd->device;
        mlx4_free_priv_pages(mr);
 err_free_mr:
        (void) mlx4_mr_free(dev->dev, &mr->mmr);
index 013049b..caf490a 100644 (file)
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
                return (-EOPNOTSUPP);
        }
 
+       if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4      |
+                                         MLX4_IB_RX_HASH_DST_IPV4      |
+                                         MLX4_IB_RX_HASH_SRC_IPV6      |
+                                         MLX4_IB_RX_HASH_DST_IPV6      |
+                                         MLX4_IB_RX_HASH_SRC_PORT_TCP  |
+                                         MLX4_IB_RX_HASH_DST_PORT_TCP  |
+                                         MLX4_IB_RX_HASH_SRC_PORT_UDP  |
+                                         MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+               pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+                        ucmd->rx_hash_fields_mask);
+               return (-EOPNOTSUPP);
+       }
+
        if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
            (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
                rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
                        return (-EOPNOTSUPP);
                }
 
-               if (rss_ctx->flags & MLX4_RSS_IPV4) {
+               if (rss_ctx->flags & MLX4_RSS_IPV4)
                        rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
-               } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+               if (rss_ctx->flags & MLX4_RSS_IPV6)
                        rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
-               } else {
+               if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
                        pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
                        return (-EOPNOTSUPP);
                }
@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
 
        if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
            (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
-               if (rss_ctx->flags & MLX4_RSS_IPV4) {
+               if (rss_ctx->flags & MLX4_RSS_IPV4)
                        rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
-               } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+               if (rss_ctx->flags & MLX4_RSS_IPV6)
                        rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
-               } else {
+               if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
                        pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
                        return (-EOPNOTSUPP);
                }
-
        } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
                   (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
                pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
index 470995f..6f6712f 100644 (file)
@@ -47,17 +47,6 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
        return err;
 }
 
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
-                               bool reset, void *out, int out_size)
-{
-       u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
-
-       MLX5_SET(query_cong_statistics_in, in, opcode,
-                MLX5_CMD_OP_QUERY_CONG_STATISTICS);
-       MLX5_SET(query_cong_statistics_in, in, clear, reset);
-       return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
                               void *out, int out_size)
 {
index af4c245..78ffded 100644 (file)
@@ -37,8 +37,6 @@
 #include <linux/mlx5/driver.h>
 
 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
-                               bool reset, void *out, int out_size);
 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
                               void *out, int out_size);
 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
index 543d0a4..262c1aa 100644 (file)
@@ -1324,7 +1324,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
                return err;
 
        if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
-           !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+           (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+            !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
                return err;
 
        mutex_lock(&dev->lb_mutex);
@@ -1342,7 +1343,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
        mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
 
        if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
-           !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+           (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+            !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
                return;
 
        mutex_lock(&dev->lb_mutex);
@@ -1463,6 +1465,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        }
 
        INIT_LIST_HEAD(&context->vma_private_list);
+       mutex_init(&context->vma_private_list_mutex);
        INIT_LIST_HEAD(&context->db_page_list);
        mutex_init(&context->db_page_mutex);
 
@@ -1624,7 +1627,9 @@ static void  mlx5_ib_vma_close(struct vm_area_struct *area)
         * mlx5_ib_disassociate_ucontext().
         */
        mlx5_ib_vma_priv_data->vma = NULL;
+       mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
        list_del(&mlx5_ib_vma_priv_data->list);
+       mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
        kfree(mlx5_ib_vma_priv_data);
 }
 
@@ -1644,10 +1649,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
                return -ENOMEM;
 
        vma_prv->vma = vma;
+       vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
        vma->vm_private_data = vma_prv;
        vma->vm_ops =  &mlx5_ib_vm_ops;
 
+       mutex_lock(&ctx->vma_private_list_mutex);
        list_add(&vma_prv->list, vma_head);
+       mutex_unlock(&ctx->vma_private_list_mutex);
 
        return 0;
 }
@@ -1690,6 +1698,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
         * mlx5_ib_vma_close.
         */
        down_write(&owning_mm->mmap_sem);
+       mutex_lock(&context->vma_private_list_mutex);
        list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
                                 list) {
                vma = vma_private->vma;
@@ -1704,6 +1713,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
                list_del(&vma_private->list);
                kfree(vma_private);
        }
+       mutex_unlock(&context->vma_private_list_mutex);
        up_write(&owning_mm->mmap_sem);
        mmput(owning_mm);
        put_task_struct(owning_process);
@@ -3737,34 +3747,6 @@ free:
        return ret;
 }
 
-static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev,
-                                      struct mlx5_ib_port *port,
-                                      struct rdma_hw_stats *stats)
-{
-       int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
-       void *out;
-       int ret, i;
-       int offset = port->cnts.num_q_counters;
-
-       out = kvzalloc(outlen, GFP_KERNEL);
-       if (!out)
-               return -ENOMEM;
-
-       ret = mlx5_cmd_query_cong_counter(dev->mdev, false, out, outlen);
-       if (ret)
-               goto free;
-
-       for (i = 0; i < port->cnts.num_cong_counters; i++) {
-               stats->value[i + offset] =
-                       be64_to_cpup((__be64 *)(out +
-                                    port->cnts.offsets[i + offset]));
-       }
-
-free:
-       kvfree(out);
-       return ret;
-}
-
 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
                                struct rdma_hw_stats *stats,
                                u8 port_num, int index)
@@ -3782,7 +3764,12 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
        num_counters = port->cnts.num_q_counters;
 
        if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
-               ret = mlx5_ib_query_cong_counters(dev, port, stats);
+               ret = mlx5_lag_query_cong_counters(dev->mdev,
+                                                  stats->value +
+                                                  port->cnts.num_q_counters,
+                                                  port->cnts.num_cong_counters,
+                                                  port->cnts.offsets +
+                                                  port->cnts.num_q_counters);
                if (ret)
                        return ret;
                num_counters += port->cnts.num_cong_counters;
@@ -4173,7 +4160,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                goto err_cnt;
 
        dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
-       if (!dev->mdev->priv.uar)
+       if (IS_ERR(dev->mdev->priv.uar))
                goto err_cong;
 
        err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
@@ -4202,7 +4189,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        }
 
        if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
-           MLX5_CAP_GEN(mdev, disable_local_lb))
+           (MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
+            MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
                mutex_init(&dev->lb_mutex);
 
        dev->ib_active = true;
index 6dd8cac..2c5f353 100644 (file)
@@ -115,6 +115,8 @@ enum {
 struct mlx5_ib_vma_private_data {
        struct list_head list;
        struct vm_area_struct *vma;
+       /* protect vma_private_list add/del */
+       struct mutex *vma_private_list_mutex;
 };
 
 struct mlx5_ib_ucontext {
@@ -129,6 +131,8 @@ struct mlx5_ib_ucontext {
        /* Transport Domain number */
        u32                     tdn;
        struct list_head        vma_private_list;
+       /* protect vma_private_list add/del */
+       struct mutex            vma_private_list_mutex;
 
        unsigned long           upd_xlt_page;
        /* protect ODP/KSM */
index ee0ee1f..d109fe8 100644 (file)
@@ -1637,6 +1637,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
        MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
        MLX5_SET(mkc, mkc, umr_en, 1);
 
+       mr->ibmr.device = pd->device;
        err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
        if (err)
                goto err_destroy_psv;
index 31ad288..cffe596 100644 (file)
@@ -4362,12 +4362,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
 
        memset(ah_attr, 0, sizeof(*ah_attr));
 
-       ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
-       rdma_ah_set_port_num(ah_attr, path->port);
-       if (rdma_ah_get_port_num(ah_attr) == 0 ||
-           rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
+       if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports))
                return;
 
+       ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+
        rdma_ah_set_port_num(ah_attr, path->port);
        rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
 
index 63bc2ef..4f7bd3b 100644 (file)
@@ -94,7 +94,7 @@ struct pvrdma_cq {
        u32 cq_handle;
        bool is_kernel;
        atomic_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_id_table {
@@ -175,7 +175,7 @@ struct pvrdma_srq {
        u32 srq_handle;
        int npages;
        refcount_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_qp {
@@ -197,7 +197,7 @@ struct pvrdma_qp {
        bool is_kernel;
        struct mutex mutex; /* QP state mutex. */
        atomic_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_dev {
index 3562c0c..e529622 100644 (file)
@@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
                pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
 
        atomic_set(&cq->refcnt, 1);
-       init_waitqueue_head(&cq->wait);
+       init_completion(&cq->free);
        spin_lock_init(&cq->cq_lock);
 
        memset(cmd, 0, sizeof(*cmd));
@@ -230,8 +230,9 @@ err_cq:
 
 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
 {
-       atomic_dec(&cq->refcnt);
-       wait_event(cq->wait, !atomic_read(&cq->refcnt));
+       if (atomic_dec_and_test(&cq->refcnt))
+               complete(&cq->free);
+       wait_for_completion(&cq->free);
 
        if (!cq->is_kernel)
                ib_umem_release(cq->umem);
index 1f4e187..e926818 100644 (file)
@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
                ibqp->event_handler(&e, ibqp->qp_context);
        }
        if (qp) {
-               atomic_dec(&qp->refcnt);
-               if (atomic_read(&qp->refcnt) == 0)
-                       wake_up(&qp->wait);
+               if (atomic_dec_and_test(&qp->refcnt))
+                       complete(&qp->free);
        }
 }
 
@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
                ibcq->event_handler(&e, ibcq->cq_context);
        }
        if (cq) {
-               atomic_dec(&cq->refcnt);
-               if (atomic_read(&cq->refcnt) == 0)
-                       wake_up(&cq->wait);
+               if (atomic_dec_and_test(&cq->refcnt))
+                       complete(&cq->free);
        }
 }
 
@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
        }
        if (srq) {
                if (refcount_dec_and_test(&srq->refcnt))
-                       wake_up(&srq->wait);
+                       complete(&srq->free);
        }
 }
 
@@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
                if (cq && cq->ibcq.comp_handler)
                        cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
                if (cq) {
-                       atomic_dec(&cq->refcnt);
-                       if (atomic_read(&cq->refcnt))
-                               wake_up(&cq->wait);
+                       if (atomic_dec_and_test(&cq->refcnt))
+                               complete(&cq->free);
                }
                pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
        }
index 10420a1..4059308 100644 (file)
@@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
                spin_lock_init(&qp->rq.lock);
                mutex_init(&qp->mutex);
                atomic_set(&qp->refcnt, 1);
-               init_waitqueue_head(&qp->wait);
+               init_completion(&qp->free);
 
                qp->state = IB_QPS_RESET;
 
@@ -428,8 +428,16 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
 
        pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
 
-       atomic_dec(&qp->refcnt);
-       wait_event(qp->wait, !atomic_read(&qp->refcnt));
+       if (atomic_dec_and_test(&qp->refcnt))
+               complete(&qp->free);
+       wait_for_completion(&qp->free);
+
+       if (!qp->is_kernel) {
+               if (qp->rumem)
+                       ib_umem_release(qp->rumem);
+               if (qp->sumem)
+                       ib_umem_release(qp->sumem);
+       }
 
        pvrdma_page_dir_cleanup(dev, &qp->pdir);
 
index 826ccb8..5acebb1 100644 (file)
@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 
        spin_lock_init(&srq->lock);
        refcount_set(&srq->refcnt, 1);
-       init_waitqueue_head(&srq->wait);
+       init_completion(&srq->free);
 
        dev_dbg(&dev->pdev->dev,
                "create shared receive queue from user space\n");
@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
        dev->srq_tbl[srq->srq_handle] = NULL;
        spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
 
-       refcount_dec(&srq->refcnt);
-       wait_event(srq->wait, !refcount_read(&srq->refcnt));
+       if (refcount_dec_and_test(&srq->refcnt))
+               complete(&srq->free);
+       wait_for_completion(&srq->free);
 
        /* There is no support for kernel clients, so this is safe. */
        ib_umem_release(srq->umem);
index 87f4bd9..71ea9e2 100644 (file)
@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
        noio_flag = memalloc_noio_save();
        p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
        if (!p->tx_ring) {
+               memalloc_noio_restore(noio_flag);
                ret = -ENOMEM;
                goto err_tx;
        }
@@ -1455,8 +1456,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
        int e = skb_queue_empty(&priv->cm.skb_queue);
 
-       if (skb_dst(skb))
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       skb_dst_update_pmtu(skb, mtu);
 
        skb_queue_tail(&priv->cm.skb_queue, skb);
        if (e)
index 3b96cda..e6151a2 100644 (file)
@@ -1236,13 +1236,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
                ipoib_ib_dev_down(dev);
 
        if (level == IPOIB_FLUSH_HEAVY) {
-               rtnl_lock();
                if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
                        ipoib_ib_dev_stop(dev);
 
-               result = ipoib_ib_dev_open(dev);
-               rtnl_unlock();
-               if (result)
+               if (ipoib_ib_dev_open(dev))
                        return;
 
                if (netif_queue_stopped(dev))
@@ -1282,7 +1279,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
        struct ipoib_dev_priv *priv =
                container_of(work, struct ipoib_dev_priv, flush_heavy);
 
+       rtnl_lock();
        __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+       rtnl_unlock();
 }
 
 void ipoib_ib_dev_cleanup(struct net_device *dev)
index 12b7f91..8880351 100644 (file)
@@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev,
        return 0;
 }
 
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
-                          struct net_device *dev)
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+                                         struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
        struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
                spin_unlock_irqrestore(&priv->lock, flags);
                ++dev->stats.tx_dropped;
                dev_kfree_skb_any(skb);
-               return;
+               return NULL;
+       }
+
+       /* To avoid race condition, make sure that the
+        * neigh will be added only once.
+        */
+       if (unlikely(!list_empty(&neigh->list))) {
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return neigh;
        }
 
        path = __path_find(dev, daddr + 4);
@@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
                        path->ah->last_send = rn->send(dev, skb, path->ah->ah,
                                                       IPOIB_QPN(daddr));
                        ipoib_neigh_put(neigh);
-                       return;
+                       return NULL;
                }
        } else {
                neigh->ah  = NULL;
@@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
 
        spin_unlock_irqrestore(&priv->lock, flags);
        ipoib_neigh_put(neigh);
-       return;
+       return NULL;
 
 err_path:
        ipoib_neigh_free(neigh);
@@ -983,6 +991,8 @@ err_drop:
 
        spin_unlock_irqrestore(&priv->lock, flags);
        ipoib_neigh_put(neigh);
+
+       return NULL;
 }
 
 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
        case htons(ETH_P_TIPC):
                neigh = ipoib_neigh_get(dev, phdr->hwaddr);
                if (unlikely(!neigh)) {
-                       neigh_add_path(skb, phdr->hwaddr, dev);
-                       return NETDEV_TX_OK;
+                       neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+                       if (likely(!neigh))
+                               return NETDEV_TX_OK;
                }
                break;
        case htons(ETH_P_ARP):
index 93e149e..9b3f47a 100644 (file)
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
                spin_lock_irqsave(&priv->lock, flags);
                if (!neigh) {
                        neigh = ipoib_neigh_alloc(daddr, dev);
-                       if (neigh) {
+                       /* Make sure that the neigh will be added only
+                        * once to mcast list.
+                        */
+                       if (neigh && list_empty(&neigh->list)) {
                                kref_get(&mcast->ah->ref);
                                neigh->ah       = mcast->ah;
                                list_add_tail(&neigh->list, &mcast->neigh_list);
index 720dfb3..1b02283 100644 (file)
@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
 {
        struct isert_conn *isert_conn = cma_id->qp->qp_context;
 
+       ib_drain_qp(isert_conn->qp);
        list_del_init(&isert_conn->node);
        isert_conn->cm_id = NULL;
        isert_put_conn(isert_conn);
index 8a1bd35..bfa576a 100644 (file)
@@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
                return -ENOMEM;
 
        attr->qp_state = IB_QPS_INIT;
-       attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
-           IB_ACCESS_REMOTE_WRITE;
+       attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
        attr->port_num = ch->sport->port;
        attr->pkey_index = 0;
 
@@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
                goto destroy_ib;
        }
 
-       guid = (__be16 *)&param->primary_path->sgid.global.interface_id;
+       guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
        snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
                 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
                 be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
index 3d8ff09..c868a87 100644 (file)
@@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
 #define GET_TIME(x)    do { x = (unsigned int)rdtsc(); } while (0)
 #define DELTA(x,y)     ((y)-(x))
 #define TIME_NAME      "TSC"
-#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
+#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
 #define GET_TIME(x)    do { x = get_cycles(); } while (0)
 #define DELTA(x,y)     ((y)-(x))
 #define TIME_NAME      "get_cycles"
index d86e595..d88d3e0 100644 (file)
@@ -229,6 +229,7 @@ static const struct xpad_device {
        { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -475,6 +476,22 @@ static const u8 xboxone_hori_init[] = {
        0x00, 0x00, 0x00, 0x80, 0x00
 };
 
+/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init1[] = {
+       0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+};
+
+/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init2[] = {
+       0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+};
+
 /*
  * A specific rumble packet is required for some PowerA pads to start
  * sending input reports. One of those pads is (0x24c6:0x543a).
@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
        XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+       XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
+       XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
index ae47312..3d51175 100644 (file)
@@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
                                return union_desc;
 
                        dev_err(&intf->dev,
-                               "Union descriptor to short (%d vs %zd\n)",
+                               "Union descriptor too short (%d vs %zd)\n",
                                union_desc->bLength, sizeof(*union_desc));
                        return NULL;
                }
index 6c51d40..c37aea9 100644 (file)
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
                         twl4030_vibra_suspend, twl4030_vibra_resume);
 
 static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
-                             struct device_node *node)
+                             struct device_node *parent)
 {
+       struct device_node *node;
+
        if (pdata && pdata->coexist)
                return true;
 
-       node = of_find_node_by_name(node, "codec");
+       node = of_get_child_by_name(parent, "codec");
        if (node) {
                of_node_put(node);
                return true;
index 5690eb7..15e0d35 100644 (file)
@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
        int vddvibr_uV = 0;
        int error;
 
-       of_node_get(twl6040_core_dev->of_node);
-       twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
+       twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
                                                 "vibra");
        if (!twl6040_core_node) {
                dev_err(&pdev->dev, "parent of node is missing?\n");
index 6bf56bb..d91f3b1 100644 (file)
@@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
                                     0, width, 0, 0);
                input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
                                     0, height, 0, 0);
-               input_set_abs_params(mtouch, ABS_MT_PRESSURE,
-                                    0, 255, 0, 0);
 
                ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
                if (ret) {
index 579b899..dbe57da 100644 (file)
@@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
        case SS4_PACKET_ID_MULTI:
                if (priv->flags & ALPS_BUTTONPAD) {
                        if (IS_SS4PLUS_DEV(priv->dev_id)) {
-                               f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
-                               f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+                               f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+                               f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+                               no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
                        } else {
                                f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
                                f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+                               no_data_x = SS4_MFPACKET_NO_AX_BL;
                        }
+                       no_data_y = SS4_MFPACKET_NO_AY_BL;
 
                        f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
                        f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
-                       no_data_x = SS4_MFPACKET_NO_AX_BL;
-                       no_data_y = SS4_MFPACKET_NO_AY_BL;
                } else {
                        if (IS_SS4PLUS_DEV(priv->dev_id)) {
-                               f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
-                               f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+                               f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+                               f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+                               no_data_x = SS4_PLUS_MFPACKET_NO_AX;
                        } else {
-                               f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
-                               f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+                               f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+                               f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+                               no_data_x = SS4_MFPACKET_NO_AX;
                        }
+                       no_data_y = SS4_MFPACKET_NO_AY;
+
                        f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
                        f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
-                       no_data_x = SS4_MFPACKET_NO_AX;
-                       no_data_y = SS4_MFPACKET_NO_AY;
                }
 
                f->first_mp = 0;
index c80a7c7..79b6d69 100644 (file)
@@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
 #define SS4_TS_Z_V2(_b)                (s8)(_b[4] & 0x7F)
 
 
-#define SS4_MFPACKET_NO_AX     8160    /* X-Coordinate value */
-#define SS4_MFPACKET_NO_AY     4080    /* Y-Coordinate value */
-#define SS4_MFPACKET_NO_AX_BL  8176    /* Buttonless X-Coordinate value */
-#define SS4_MFPACKET_NO_AY_BL  4088    /* Buttonless Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX             8160    /* X-Coordinate value */
+#define SS4_MFPACKET_NO_AY             4080    /* Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX_BL          8176    /* Buttonless X-Coord value */
+#define SS4_MFPACKET_NO_AY_BL          4088    /* Buttonless Y-Coord value */
+#define SS4_PLUS_MFPACKET_NO_AX                4080    /* SS4 PLUS, X */
+#define SS4_PLUS_MFPACKET_NO_AX_BL     4088    /* Buttonless SS4 PLUS, X */
 
 /*
  * enum V7_PACKET_ID - defines the packet type for V7
index b84cd97..a4aaa74 100644 (file)
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
                case 5:
                        etd->hw_version = 3;
                        break;
-               case 6 ... 14:
+               case 6 ... 15:
                        etd->hw_version = 4;
                        break;
                default:
index ee5466a..cd9f61c 100644 (file)
@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0046", /* X250 */
        "LEN004a", /* W541 */
        "LEN200f", /* T450s */
+       "LEN2018", /* T460p */
        NULL
 };
 
index 0871010..bbd2922 100644 (file)
 #include "psmouse.h"
 #include "trackpoint.h"
 
+static const char * const trackpoint_variants[] = {
+       [TP_VARIANT_IBM]        = "IBM",
+       [TP_VARIANT_ALPS]       = "ALPS",
+       [TP_VARIANT_ELAN]       = "Elan",
+       [TP_VARIANT_NXP]        = "NXP",
+};
+
 /*
  * Power-on Reset: Resets all trackpoint parameters, including RAM values,
  * to defaults.
@@ -26,7 +33,7 @@
  */
 static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
 {
-       unsigned char results[2];
+       u8 results[2];
        int tries = 0;
 
        /* Issue POR command, and repeat up to once if 0xFC00 received */
@@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
 
        /* Check for success response -- 0xAA00 */
        if (results[0] != 0xAA || results[1] != 0x00)
-               return -1;
+               return -ENODEV;
 
        return 0;
 }
@@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
 /*
  * Device IO: read, write and toggle bit
  */
-static int trackpoint_read(struct ps2dev *ps2dev,
-                          unsigned char loc, unsigned char *results)
+static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results)
 {
        if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
            ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
@@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev,
        return 0;
 }
 
-static int trackpoint_write(struct ps2dev *ps2dev,
-                           unsigned char loc, unsigned char val)
+static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val)
 {
        if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
            ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
@@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev,
        return 0;
 }
 
-static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
-                                unsigned char loc, unsigned char mask)
+static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask)
 {
        /* Bad things will happen if the loc param isn't in this range */
        if (loc < 0x20 || loc >= 0x2F)
@@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
        return 0;
 }
 
-static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
-                                unsigned char mask, unsigned char value)
+static int trackpoint_update_bit(struct ps2dev *ps2dev,
+                                u8 loc, u8 mask, u8 value)
 {
        int retval = 0;
-       unsigned char data;
+       u8 data;
 
        trackpoint_read(ps2dev, loc, &data);
        if (((data & mask) == mask) != !!value)
@@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
  */
 struct trackpoint_attr_data {
        size_t field_offset;
-       unsigned char command;
-       unsigned char mask;
-       unsigned char inverted;
-       unsigned char power_on_default;
+       u8 command;
+       u8 mask;
+       bool inverted;
+       u8 power_on_default;
 };
 
-static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
+static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse,
+                                       void *data, char *buf)
 {
        struct trackpoint_data *tp = psmouse->private;
        struct trackpoint_attr_data *attr = data;
-       unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
+       u8 value = *(u8 *)((void *)tp + attr->field_offset);
 
        if (attr->inverted)
                value = !value;
@@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
 {
        struct trackpoint_data *tp = psmouse->private;
        struct trackpoint_attr_data *attr = data;
-       unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
-       unsigned char value;
+       u8 *field = (void *)tp + attr->field_offset;
+       u8 value;
        int err;
 
        err = kstrtou8(buf, 10, &value);
@@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
 {
        struct trackpoint_data *tp = psmouse->private;
        struct trackpoint_attr_data *attr = data;
-       unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
-       unsigned int value;
+       bool *field = (void *)tp + attr->field_offset;
+       bool value;
        int err;
 
-       err = kstrtouint(buf, 10, &value);
+       err = kstrtobool(buf, &value);
        if (err)
                return err;
 
-       if (value > 1)
-               return -EINVAL;
-
        if (attr->inverted)
                value = !value;
 
@@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO,                              \
                    &trackpoint_attr_##_name,                           \
                    trackpoint_show_int_attr, trackpoint_set_bit_attr)
 
-#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name)                    \
-do {                                                                   \
-       struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name;  \
-                                                                       \
-       trackpoint_update_bit(&_psmouse->ps2dev,                        \
-                       _attr->command, _attr->mask, _tp->_name);       \
-} while (0)
-
-#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name)             \
-do {                                                                   \
-       if (!_power_on ||                                               \
-           _tp->_name != trackpoint_attr_##_name.power_on_default) {   \
-               if (!trackpoint_attr_##_name.mask)                      \
-                       trackpoint_write(&_psmouse->ps2dev,             \
-                                trackpoint_attr_##_name.command,       \
-                                _tp->_name);                           \
-               else                                                    \
-                       TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name);    \
-       }                                                               \
-} while (0)
-
-#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name)                            \
-       (_tp->_name = trackpoint_attr_##_name.power_on_default)
-
 TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS);
 TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED);
 TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA);
@@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
 TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
 TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
 
-TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
+TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false,
                    TP_DEF_PTSON);
-TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0,
+TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false,
                    TP_DEF_SKIPBACK);
-TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1,
+TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true,
                    TP_DEF_EXT_DEV);
 
+static bool trackpoint_is_attr_available(struct psmouse *psmouse,
+                                        struct attribute *attr)
+{
+       struct trackpoint_data *tp = psmouse->private;
+
+       return tp->variant_id == TP_VARIANT_IBM ||
+               attr == &psmouse_attr_sensitivity.dattr.attr ||
+               attr == &psmouse_attr_press_to_select.dattr.attr;
+}
+
+static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
+                                         struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct serio *serio = to_serio_port(dev);
+       struct psmouse *psmouse = serio_get_drvdata(serio);
+
+       return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0;
+}
+
 static struct attribute *trackpoint_attrs[] = {
        &psmouse_attr_sensitivity.dattr.attr,
        &psmouse_attr_speed.dattr.attr,
@@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = {
 };
 
 static struct attribute_group trackpoint_attr_group = {
-       .attrs = trackpoint_attrs,
+       .is_visible     = trackpoint_is_attr_visible,
+       .attrs          = trackpoint_attrs,
 };
 
-static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id)
-{
-       unsigned char param[2] = { 0 };
+#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name)             \
+do {                                                                   \
+       struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name;  \
+                                                                       \
+       if ((!_power_on || _tp->_name != _attr->power_on_default) &&    \
+           trackpoint_is_attr_available(_psmouse,                      \
+                               &psmouse_attr_##_name.dattr.attr)) {    \
+               if (!_attr->mask)                                       \
+                       trackpoint_write(&_psmouse->ps2dev,             \
+                                        _attr->command, _tp->_name);   \
+               else                                                    \
+                       trackpoint_update_bit(&_psmouse->ps2dev,        \
+                                       _attr->command, _attr->mask,    \
+                                       _tp->_name);                    \
+       }                                                               \
+} while (0)
 
-       if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
-               return -1;
+#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name)                    \
+do {                                                                   \
+       _tp->_name = trackpoint_attr_##_name.power_on_default;          \
+} while (0)
 
-       /* add new TP ID. */
-       if (!(param[0] & TP_MAGIC_IDENT))
-               return -1;
+static int trackpoint_start_protocol(struct psmouse *psmouse,
+                                    u8 *variant_id, u8 *firmware_id)
+{
+       u8 param[2] = { 0 };
+       int error;
 
-       if (firmware_id)
-               *firmware_id = param[1];
+       error = ps2_command(&psmouse->ps2dev,
+                           param, MAKE_PS2_CMD(0, 2, TP_READ_ID));
+       if (error)
+               return error;
+
+       switch (param[0]) {
+       case TP_VARIANT_IBM:
+       case TP_VARIANT_ALPS:
+       case TP_VARIANT_ELAN:
+       case TP_VARIANT_NXP:
+               if (variant_id)
+                       *variant_id = param[0];
+               if (firmware_id)
+                       *firmware_id = param[1];
+               return 0;
+       }
 
-       return 0;
+       return -ENODEV;
 }
 
 /*
@@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
 {
        struct trackpoint_data *tp = psmouse->private;
 
-       if (!in_power_on_state) {
+       if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) {
                /*
                 * Disable features that may make device unusable
                 * with this driver.
@@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
 
 static void trackpoint_disconnect(struct psmouse *psmouse)
 {
-       sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group);
+       device_remove_group(&psmouse->ps2dev.serio->dev,
+                           &trackpoint_attr_group);
 
        kfree(psmouse->private);
        psmouse->private = NULL;
@@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse)
 
 static int trackpoint_reconnect(struct psmouse *psmouse)
 {
-       int reset_fail;
+       struct trackpoint_data *tp = psmouse->private;
+       int error;
+       bool was_reset;
 
-       if (trackpoint_start_protocol(psmouse, NULL))
-               return -1;
+       error = trackpoint_start_protocol(psmouse, NULL, NULL);
+       if (error)
+               return error;
 
-       reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev);
-       if (trackpoint_sync(psmouse, !reset_fail))
-               return -1;
+       was_reset = tp->variant_id == TP_VARIANT_IBM &&
+                   trackpoint_power_on_reset(&psmouse->ps2dev) == 0;
+
+       error = trackpoint_sync(psmouse, was_reset);
+       if (error)
+               return error;
 
        return 0;
 }
@@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse)
 int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
 {
        struct ps2dev *ps2dev = &psmouse->ps2dev;
-       unsigned char firmware_id;
-       unsigned char button_info;
+       struct trackpoint_data *tp;
+       u8 variant_id;
+       u8 firmware_id;
+       u8 button_info;
        int error;
 
-       if (trackpoint_start_protocol(psmouse, &firmware_id))
-               return -1;
+       error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id);
+       if (error)
+               return error;
 
        if (!set_properties)
                return 0;
 
-       if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
-               psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
-               button_info = 0x33;
-       }
-
-       psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
-       if (!psmouse->private)
+       tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+       if (!tp)
                return -ENOMEM;
 
-       psmouse->vendor = "IBM";
+       trackpoint_defaults(tp);
+       tp->variant_id = variant_id;
+       tp->firmware_id = firmware_id;
+
+       psmouse->private = tp;
+
+       psmouse->vendor = trackpoint_variants[variant_id];
        psmouse->name = "TrackPoint";
 
        psmouse->reconnect = trackpoint_reconnect;
        psmouse->disconnect = trackpoint_disconnect;
 
+       if (variant_id != TP_VARIANT_IBM) {
+               /* Newer variants do not support extended button query. */
+               button_info = 0x33;
+       } else {
+               error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info);
+               if (error) {
+                       psmouse_warn(psmouse,
+                                    "failed to get extended button data, assuming 3 buttons\n");
+                       button_info = 0x33;
+               } else if (!button_info) {
+                       psmouse_warn(psmouse,
+                                    "got 0 in extended button data, assuming 3 buttons\n");
+                       button_info = 0x33;
+               }
+       }
+
        if ((button_info & 0x0f) >= 3)
-               __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
+               input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE);
 
        __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
        __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
 
-       trackpoint_defaults(psmouse->private);
-
-       error = trackpoint_power_on_reset(ps2dev);
-
-       /* Write defaults to TP only if reset fails. */
-       if (error)
+       if (variant_id != TP_VARIANT_IBM ||
+           trackpoint_power_on_reset(ps2dev) != 0) {
+               /*
+                * Write defaults to TP if we did not reset the trackpoint.
+                */
                trackpoint_sync(psmouse, false);
+       }
 
-       error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
+       error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group);
        if (error) {
                psmouse_err(psmouse,
                            "failed to create sysfs attributes, error: %d\n",
@@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
        }
 
        psmouse_info(psmouse,
-                    "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
-                    firmware_id,
+                    "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+                    psmouse->vendor, firmware_id,
                     (button_info & 0xf0) >> 4, button_info & 0x0f);
 
        return 0;
index 8805575..10a0391 100644 (file)
 #define TP_COMMAND             0xE2    /* Commands start with this */
 
 #define TP_READ_ID             0xE1    /* Sent for device identification */
-#define TP_MAGIC_IDENT         0x03    /* Sent after a TP_READ_ID followed */
-                                       /* by the firmware ID */
-                                       /* Firmware ID includes 0x1, 0x2, 0x3 */
 
+/*
+ * Valid first byte responses to the "Read Secondary ID" (0xE1) command.
+ * 0x01 was the original IBM trackpoint, others implement very limited
+ * subset of trackpoint features.
+ */
+#define TP_VARIANT_IBM         0x01
+#define TP_VARIANT_ALPS                0x02
+#define TP_VARIANT_ELAN                0x03
+#define TP_VARIANT_NXP         0x04
 
 /*
  * Commands
 
 #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
 
-struct trackpoint_data
-{
-       unsigned char sensitivity, speed, inertia, reach;
-       unsigned char draghys, mindrag;
-       unsigned char thresh, upthresh;
-       unsigned char ztime, jenks;
-       unsigned char drift_time;
+struct trackpoint_data {
+       u8 variant_id;
+       u8 firmware_id;
+
+       u8 sensitivity, speed, inertia, reach;
+       u8 draghys, mindrag;
+       u8 thresh, upthresh;
+       u8 ztime, jenks;
+       u8 drift_time;
 
        /* toggles */
-       unsigned char press_to_select;
-       unsigned char skipback;
-       unsigned char ext_dev;
+       bool press_to_select;
+       bool skipback;
+       bool ext_dev;
 };
 
 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
index 4f2bb59..141ea22 100644 (file)
@@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
                rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
                        "Failed to process interrupt request: %d\n", ret);
 
-       if (count)
+       if (count) {
                kfree(attn_data.data);
+               attn_data.data = NULL;
+       }
 
        if (!kfifo_is_empty(&drvdata->attn_fifo))
                return rmi_irq_fn(irq, dev_id);
index ae966e3..8a07ae1 100644 (file)
@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn)
 
        dev_set_drvdata(&fn->dev, f01);
 
-       error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group);
+       error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
        if (error)
-               dev_warn(&fn->dev,
-                        "Failed to create attribute group: %d\n", error);
+               dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
 
        return 0;
 }
 
+static void rmi_f01_remove(struct rmi_function *fn)
+{
+       /* Note that the bus device is used, not the F01 device */
+       sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
+}
+
 static int rmi_f01_config(struct rmi_function *fn)
 {
        struct f01_data *f01 = dev_get_drvdata(&fn->dev);
@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = {
        },
        .func           = 0x01,
        .probe          = rmi_f01_probe,
+       .remove         = rmi_f01_remove,
        .config         = rmi_f01_config,
        .attention      = rmi_f01_attention,
        .suspend        = rmi_f01_suspend,
index 7ed828a..3486d94 100644 (file)
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
        int data, n, ret;
        if (!np)
                return -ENODEV;
-       np = of_find_node_by_name(np, "touch");
+       np = of_get_child_by_name(np, "touch");
        if (!np) {
                dev_err(&pdev->dev, "Can't find touch node\n");
                return -EINVAL;
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
        if (data) {
                ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
                if (ret < 0)
-                       return -EINVAL;
+                       goto err_put_node;
        }
        /* set tsi prebias time */
        if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
                ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
                if (ret < 0)
-                       return -EINVAL;
+                       goto err_put_node;
        }
        /* set prebias & prechg time of pen detect */
        data = 0;
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
        if (data) {
                ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
                if (ret < 0)
-                       return -EINVAL;
+                       goto err_put_node;
        }
        of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
+
+       of_node_put(np);
+
        return 0;
+
+err_put_node:
+       of_node_put(np);
+
+       return -EINVAL;
 }
 #else
 #define pm860x_touch_dt_init(x, y, z)  (-1)
index e102d77..a458e5e 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/platform_device.h>
 #include <linux/async.h>
 #include <linux/i2c.h>
@@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
        }
 
        /*
-        * Systems using device tree should set up interrupt via DTS,
-        * the rest will use the default falling edge interrupts.
+        * Platform code (ACPI, DTS) should normally set up interrupt
+        * for us, but in case it did not let's fall back to using falling
+        * edge to be compatible with older Chromebooks.
         */
-       irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+       irqflags = irq_get_trigger_type(client->irq);
+       if (!irqflags)
+               irqflags = IRQF_TRIGGER_FALLING;
 
        error = devm_request_threaded_irq(&client->dev, client->irq,
                                          NULL, elants_i2c_irq,
index fc080a7..f1cd4dd 100644 (file)
@@ -10,8 +10,7 @@
 #include <linux/of.h>
 #include <linux/firmware.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/acpi.h>
 #include <linux/interrupt.h>
index 8d7f9c8..9642f10 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/input.h>
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
+#include <linux/module.h>
 
 static bool touchscreen_get_prop_u32(struct device *dev,
                                     const char *property,
@@ -185,3 +186,6 @@ void touchscreen_report_pos(struct input_dev *input,
        input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
 }
 EXPORT_SYMBOL(touchscreen_report_pos);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device-tree helpers functions for touchscreen devices");
index 26b1cb8..675efa9 100644 (file)
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Samsung S6SY761 Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// Samsung S6SY761 Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
 
 #include <asm/unaligned.h>
 #include <linux/delay.h>
index c12d018..2a123e2 100644 (file)
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * STMicroelectronics FTS Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// STMicroelectronics FTS Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
 
 #include <linux/delay.h>
 #include <linux/i2c.h>
index 7d5eb00..97baf88 100644 (file)
@@ -4184,7 +4184,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
                               struct irq_cfg *cfg);
 
 static int irq_remapping_activate(struct irq_domain *domain,
-                                 struct irq_data *irq_data, bool early)
+                                 struct irq_data *irq_data, bool reserve)
 {
        struct amd_ir_data *data = irq_data->chip_data;
        struct irq_2_irte *irte_info = &data->irq_2_irte;
index f122071..744592d 100644 (file)
@@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
        domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
        domain->geometry.aperture_end = (1UL << ias) - 1;
        domain->geometry.force_aperture = true;
-       smmu_domain->pgtbl_ops = pgtbl_ops;
 
        ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
-       if (ret < 0)
+       if (ret < 0) {
                free_io_pgtable_ops(pgtbl_ops);
+               return ret;
+       }
 
-       return ret;
+       smmu_domain->pgtbl_ops = pgtbl_ops;
+       return 0;
 }
 
 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
 
 static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
 {
-       int i;
+       int i, j;
        struct arm_smmu_master_data *master = fwspec->iommu_priv;
        struct arm_smmu_device *smmu = master->smmu;
 
@@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
                u32 sid = fwspec->ids[i];
                __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
 
+               /* Bridged PCI devices may end up with duplicated IDs */
+               for (j = 0; j < i; j++)
+                       if (fwspec->ids[j] == sid)
+                               break;
+               if (j < i)
+                       continue;
+
                arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
        }
 }
index a0babdb..4a2de34 100644 (file)
@@ -2250,10 +2250,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                uint64_t tmp;
 
                if (!sg_res) {
+                       unsigned int pgoff = sg->offset & ~PAGE_MASK;
+
                        sg_res = aligned_nrpages(sg->offset, sg->length);
-                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
                        sg->dma_length = sg->length;
-                       pteval = page_to_phys(sg_page(sg)) | prot;
+                       pteval = (sg_phys(sg) - pgoff) | prot;
                        phys_pfn = pteval >> VTD_PAGE_SHIFT;
                }
 
@@ -3787,7 +3789,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
 
        for_each_sg(sglist, sg, nelems, i) {
                BUG_ON(!sg_page(sg));
-               sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+               sg->dma_address = sg_phys(sg);
                sg->dma_length = sg->length;
        }
        return nelems;
index 76a193c..66f69af 100644 (file)
@@ -1397,7 +1397,7 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
 }
 
 static int intel_irq_remapping_activate(struct irq_domain *domain,
-                                       struct irq_data *irq_data, bool early)
+                                       struct irq_data *irq_data, bool reserve)
 {
        intel_ir_reconfigure_irte(irq_data, true);
        return 0;
index 4039e64..06f025f 100644 (file)
@@ -2303,7 +2303,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 }
 
 static int its_irq_domain_activate(struct irq_domain *domain,
-                                  struct irq_data *d, bool early)
+                                  struct irq_data *d, bool reserve)
 {
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        u32 event = its_get_event_id(d);
@@ -2818,7 +2818,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
 }
 
 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-                                      struct irq_data *d, bool early)
+                                      struct irq_data *d, bool reserve)
 {
        struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
        struct its_node *its;
index 06f29cf..cee59fe 100644 (file)
@@ -342,6 +342,9 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
  */
 static struct lock_class_key intc_irqpin_irq_lock_class;
 
+/* And this is for the request mutex */
+static struct lock_class_key intc_irqpin_irq_request_class;
+
 static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
                                      irq_hw_number_t hw)
 {
@@ -352,7 +355,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
 
        intc_irqpin_dbg(&p->irq[hw], "map");
        irq_set_chip_data(virq, h->host_data);
-       irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
+       irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
+                             &intc_irqpin_irq_request_class);
        irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
        return 0;
 }
index fd83c7f..ede4fa0 100644 (file)
@@ -188,6 +188,7 @@ void led_blink_set(struct led_classdev *led_cdev,
 {
        del_timer_sync(&led_cdev->blink_timer);
 
+       clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
        clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
        clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
 
index a526743..8988ba3 100644 (file)
@@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev)
        if (!led)
                return -ENOMEM;
 
-       led->ledtype = (u32)of_device_get_match_data(&pdev->dev);
+       led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev);
 
        map = dev_get_regmap(pdev->dev.parent, NULL);
        if (!map) {
index a27d852..a0cc1bc 100644 (file)
@@ -490,7 +490,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
                if (b == -1)
                        goto err;
 
-               k->ptr[i] = PTR(ca->buckets[b].gen,
+               k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
                                bucket_to_sector(c, b),
                                ca->sb.nr_this_dev);
 
index 11c5503..81e8dc3 100644 (file)
@@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
        c->shrink.scan_objects = bch_mca_scan;
        c->shrink.seeks = 4;
        c->shrink.batch = c->btree_pages * 2;
-       register_shrinker(&c->shrink);
+
+       if (register_shrinker(&c->shrink))
+               pr_warn("bcache: %s: could not register shrinker",
+                               __func__);
 
        return 0;
 }
index 41c238f..f9d3917 100644 (file)
@@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
                return false;
 
        for (i = 0; i < KEY_PTRS(l); i++)
-               if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+               if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
                    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
                        return false;
 
index 02a98dd..a87165c 100644 (file)
@@ -170,6 +170,11 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
                 * find a sequence of buckets with valid journal entries
                 */
                for (i = 0; i < ca->sb.njournal_buckets; i++) {
+                       /*
+                        * We must try the index l with ZERO first for
+                        * correctness due to the scenario that the journal
+                        * bucket is circular buffer which might have wrapped
+                        */
                        l = (i * 2654435769U) % ca->sb.njournal_buckets;
 
                        if (test_bit(l, bitmap))
@@ -507,7 +512,7 @@ static void journal_reclaim(struct cache_set *c)
                        continue;
 
                ja->cur_idx = next;
-               k->ptr[n++] = PTR(0,
+               k->ptr[n++] = MAKE_PTR(0,
                                  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
                                  ca->sb.nr_this_dev);
        }
index 3a7aed7..643c302 100644 (file)
@@ -708,16 +708,15 @@ static void cached_dev_read_error(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
        struct bio *bio = &s->bio.bio;
-       struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
        /*
-        * If cache device is dirty (dc->has_dirty is non-zero), then
-        * recovery a failed read request from cached device may get a
-        * stale data back. So read failure recovery is only permitted
-        * when cache device is clean.
+        * If read request hit dirty data (s->read_dirty_data is true),
+        * then recovery a failed read request from cached device may
+        * get a stale data back. So read failure recovery is only
+        * permitted when read request hit clean data in cache device,
+        * or when cache read race happened.
         */
-       if (s->recoverable &&
-           (dc && !atomic_read(&dc->has_dirty))) {
+       if (s->recoverable && !s->read_dirty_data) {
                /* Retry from the backing device: */
                trace_bcache_read_retry(s->orig_bio);
 
index b8ac591..c546b56 100644 (file)
@@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
        int l;
        struct dm_buffer *b, *tmp;
        unsigned long freed = 0;
-       unsigned long count = nr_to_scan;
+       unsigned long count = c->n_buffers[LIST_CLEAN] +
+                             c->n_buffers[LIST_DIRTY];
        unsigned long retain_target = get_retain_buffers(c);
 
        for (l = 0; l < LIST_SIZE; l++) {
@@ -1647,8 +1648,11 @@ static unsigned long
 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
+       unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
+                             READ_ONCE(c->n_buffers[LIST_DIRTY]);
+       unsigned long retain_target = get_retain_buffers(c);
 
-       return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
+       return (count < retain_target) ? 0 : (count - retain_target);
 }
 
 /*
index cf23a14..47407e4 100644 (file)
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
 {
        int r;
 
-       r = dm_register_target(&cache_target);
-       if (r) {
-               DMERR("cache target registration failed: %d", r);
-               return r;
-       }
-
        migration_cache = KMEM_CACHE(dm_cache_migration, 0);
        if (!migration_cache) {
                dm_unregister_target(&cache_target);
                return -ENOMEM;
        }
 
+       r = dm_register_target(&cache_target);
+       if (r) {
+               DMERR("cache target registration failed: %d", r);
+               return r;
+       }
+
        return 0;
 }
 
index 9fc12f5..554d603 100644 (file)
@@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
        /* Ignore extra keys (which are used for IV etc) */
        subkey_size = crypt_subkey_size(cc);
 
-       if (crypt_integrity_hmac(cc))
+       if (crypt_integrity_hmac(cc)) {
+               if (subkey_size < cc->key_mac_size)
+                       return -EINVAL;
+
                crypt_copy_authenckey(cc->authenc_key, cc->key,
                                      subkey_size - cc->key_mac_size,
                                      cc->key_mac_size);
+       }
+
        for (i = 0; i < cc->tfms_count; i++) {
                if (crypt_integrity_hmac(cc))
                        r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
 
        ret = crypt_setkey(cc);
 
-       /* wipe the kernel key payload copy in each case */
-       memset(cc->key, 0, cc->key_size * sizeof(u8));
-
        if (!ret) {
                set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
                kzfree(cc->key_string);
@@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
                }
        }
 
+       /* wipe the kernel key payload copy */
+       if (cc->key_string)
+               memset(cc->key, 0, cc->key_size * sizeof(u8));
+
        return ret;
 }
 
@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                        cc->tag_pool_max_sectors * cc->on_disk_tag_size);
                if (!cc->tag_pool) {
                        ti->error = "Cannot allocate integrity tags mempool";
+                       ret = -ENOMEM;
                        goto bad;
                }
 
@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
                                return ret;
                        if (cc->iv_gen_ops && cc->iv_gen_ops->init)
                                ret = cc->iv_gen_ops->init(cc);
+                       /* wipe the kernel key payload copy */
+                       if (cc->key_string)
+                               memset(cc->key, 0, cc->key_size * sizeof(u8));
                        return ret;
                }
                if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type crypt_target = {
        .name   = "crypt",
-       .version = {1, 18, 0},
+       .version = {1, 18, 1},
        .module = THIS_MODULE,
        .ctr    = crypt_ctr,
        .dtr    = crypt_dtr,
index 05c7bfd..46d7c87 100644 (file)
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
        int r = 0;
        unsigned i;
        __u64 journal_pages, journal_desc_size, journal_tree_size;
-       unsigned char *crypt_data = NULL;
+       unsigned char *crypt_data = NULL, *crypt_iv = NULL;
+       struct skcipher_request *req = NULL;
 
        ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
        ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
 
                if (blocksize == 1) {
                        struct scatterlist *sg;
-                       SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
-                       unsigned char iv[ivsize];
-                       skcipher_request_set_tfm(req, ic->journal_crypt);
+
+                       req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+                       if (!req) {
+                               *error = "Could not allocate crypt request";
+                               r = -ENOMEM;
+                               goto bad;
+                       }
+
+                       crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+                       if (!crypt_iv) {
+                               *error = "Could not allocate iv";
+                               r = -ENOMEM;
+                               goto bad;
+                       }
 
                        ic->journal_xor = dm_integrity_alloc_page_list(ic);
                        if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                sg_set_buf(&sg[i], va, PAGE_SIZE);
                        }
                        sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
-                       memset(iv, 0x00, ivsize);
+                       memset(crypt_iv, 0x00, ivsize);
 
-                       skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
+                       skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
                        init_completion(&comp.comp);
                        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
                        if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                        crypto_free_skcipher(ic->journal_crypt);
                        ic->journal_crypt = NULL;
                } else {
-                       SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
-                       unsigned char iv[ivsize];
                        unsigned crypt_len = roundup(ivsize, blocksize);
 
+                       req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+                       if (!req) {
+                               *error = "Could not allocate crypt request";
+                               r = -ENOMEM;
+                               goto bad;
+                       }
+
+                       crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+                       if (!crypt_iv) {
+                               *error = "Could not allocate iv";
+                               r = -ENOMEM;
+                               goto bad;
+                       }
+
                        crypt_data = kmalloc(crypt_len, GFP_KERNEL);
                        if (!crypt_data) {
                                *error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                goto bad;
                        }
 
-                       skcipher_request_set_tfm(req, ic->journal_crypt);
-
                        ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
                        if (!ic->journal_scatterlist) {
                                *error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                struct skcipher_request *section_req;
                                __u32 section_le = cpu_to_le32(i);
 
-                               memset(iv, 0x00, ivsize);
+                               memset(crypt_iv, 0x00, ivsize);
                                memset(crypt_data, 0x00, crypt_len);
                                memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
 
                                sg_init_one(&sg, crypt_data, crypt_len);
-                               skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
+                               skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
                                init_completion(&comp.comp);
                                comp.in_flight = (atomic_t)ATOMIC_INIT(1);
                                if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
        }
 bad:
        kfree(crypt_data);
+       kfree(crypt_iv);
+       skcipher_request_free(req);
+
        return r;
 }
 
index c8faa2b..f7810cc 100644 (file)
@@ -457,6 +457,38 @@ do {                                                                       \
                 dm_noflush_suspending((m)->ti));                       \
 } while (0)
 
+/*
+ * Check whether bios must be queued in the device-mapper core rather
+ * than here in the target.
+ *
+ * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
+ * the same value then we are not between multipath_presuspend()
+ * and multipath_resume() calls and we have no need to check
+ * for the DMF_NOFLUSH_SUSPENDING flag.
+ */
+static bool __must_push_back(struct multipath *m, unsigned long flags)
+{
+       return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
+                test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
+               dm_noflush_suspending(m->ti));
+}
+
+/*
+ * Following functions use READ_ONCE to get atomic access to
+ * all m->flags to avoid taking spinlock
+ */
+static bool must_push_back_rq(struct multipath *m)
+{
+       unsigned long flags = READ_ONCE(m->flags);
+       return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
+}
+
+static bool must_push_back_bio(struct multipath *m)
+{
+       unsigned long flags = READ_ONCE(m->flags);
+       return __must_push_back(m, flags);
+}
+
 /*
  * Map cloned requests (request-based multipath)
  */
@@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
                pgpath = choose_pgpath(m, nr_bytes);
 
        if (!pgpath) {
-               if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+               if (must_push_back_rq(m))
                        return DM_MAPIO_DELAY_REQUEUE;
                dm_report_EIO(m);       /* Failed */
                return DM_MAPIO_KILL;
@@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
        }
 
        if (!pgpath) {
-               if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+               if (must_push_back_bio(m))
                        return DM_MAPIO_REQUEUE;
                dm_report_EIO(m);
                return DM_MAPIO_KILL;
@@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
        assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
                   (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
                   (!save_old_value && queue_if_no_path));
-       assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
-                  queue_if_no_path || dm_noflush_suspending(m->ti));
+       assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
        spin_unlock_irqrestore(&m->lock, flags);
 
        if (!queue_if_no_path) {
@@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
                        fail_path(pgpath);
 
                if (atomic_read(&m->nr_valid_paths) == 0 &&
-                   !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+                   !must_push_back_rq(m)) {
                        if (error == BLK_STS_IOERR)
                                dm_report_EIO(m);
                        /* complete with the original error */
@@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
 
        if (atomic_read(&m->nr_valid_paths) == 0 &&
            !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-               dm_report_EIO(m);
-               *error = BLK_STS_IOERR;
+               if (must_push_back_bio(m)) {
+                       r = DM_ENDIO_REQUEUE;
+               } else {
+                       dm_report_EIO(m);
+                       *error = BLK_STS_IOERR;
+               }
                goto done;
        }
 
@@ -1957,13 +1992,6 @@ static int __init dm_multipath_init(void)
 {
        int r;
 
-       r = dm_register_target(&multipath_target);
-       if (r < 0) {
-               DMERR("request-based register failed %d", r);
-               r = -EINVAL;
-               goto bad_register_target;
-       }
-
        kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
        if (!kmultipathd) {
                DMERR("failed to create workqueue kmpathd");
@@ -1985,13 +2013,20 @@ static int __init dm_multipath_init(void)
                goto bad_alloc_kmpath_handlerd;
        }
 
+       r = dm_register_target(&multipath_target);
+       if (r < 0) {
+               DMERR("request-based register failed %d", r);
+               r = -EINVAL;
+               goto bad_register_target;
+       }
+
        return 0;
 
+bad_register_target:
+       destroy_workqueue(kmpath_handlerd);
 bad_alloc_kmpath_handlerd:
        destroy_workqueue(kmultipathd);
 bad_alloc_kmultipathd:
-       dm_unregister_target(&multipath_target);
-bad_register_target:
        return r;
 }
 
index 1113b42..a0613bd 100644 (file)
@@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
                return r;
        }
 
-       r = dm_register_target(&snapshot_target);
-       if (r < 0) {
-               DMERR("snapshot target register failed %d", r);
-               goto bad_register_snapshot_target;
-       }
-
-       r = dm_register_target(&origin_target);
-       if (r < 0) {
-               DMERR("Origin target register failed %d", r);
-               goto bad_register_origin_target;
-       }
-
-       r = dm_register_target(&merge_target);
-       if (r < 0) {
-               DMERR("Merge target register failed %d", r);
-               goto bad_register_merge_target;
-       }
-
        r = init_origin_hash();
        if (r) {
                DMERR("init_origin_hash failed.");
@@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
                goto bad_pending_cache;
        }
 
+       r = dm_register_target(&snapshot_target);
+       if (r < 0) {
+               DMERR("snapshot target register failed %d", r);
+               goto bad_register_snapshot_target;
+       }
+
+       r = dm_register_target(&origin_target);
+       if (r < 0) {
+               DMERR("Origin target register failed %d", r);
+               goto bad_register_origin_target;
+       }
+
+       r = dm_register_target(&merge_target);
+       if (r < 0) {
+               DMERR("Merge target register failed %d", r);
+               goto bad_register_merge_target;
+       }
+
        return 0;
 
-bad_pending_cache:
-       kmem_cache_destroy(exception_cache);
-bad_exception_cache:
-       exit_origin_hash();
-bad_origin_hash:
-       dm_unregister_target(&merge_target);
 bad_register_merge_target:
        dm_unregister_target(&origin_target);
 bad_register_origin_target:
        dm_unregister_target(&snapshot_target);
 bad_register_snapshot_target:
+       kmem_cache_destroy(pending_cache);
+bad_pending_cache:
+       kmem_cache_destroy(exception_cache);
+bad_exception_cache:
+       exit_origin_hash();
+bad_origin_hash:
        dm_exception_store_exit();
 
        return r;
index 88130b5..aaffd0c 100644 (file)
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 
                refcount_set(&dd->count, 1);
                list_add(&dd->list, &t->devices);
+               goto out;
 
        } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
                r = upgrade_mode(dd, mode, t->md);
                if (r)
                        return r;
-               refcount_inc(&dd->count);
        }
-
+       refcount_inc(&dd->count);
+out:
        *result = dd->dm_dev;
        return 0;
 }
index d31d18d..36ef284 100644 (file)
 #define SECTOR_TO_BLOCK_SHIFT 3
 
 /*
+ * For btree insert:
  *  3 for btree insert +
  *  2 for btree lookup used within space map
+ * For btree remove:
+ *  2 for shadow spine +
+ *  4 for rebalance 3 child node
  */
-#define THIN_MAX_CONCURRENT_LOCKS 5
+#define THIN_MAX_CONCURRENT_LOCKS 6
 
 /* This should be plenty */
 #define SPACE_MAP_ROOT_SIZE 128
index 89e5dff..f91d771 100644 (file)
@@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
 
 static int __init dm_thin_init(void)
 {
-       int r;
+       int r = -ENOMEM;
 
        pool_table_init();
 
+       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+       if (!_new_mapping_cache)
+               return r;
+
        r = dm_register_target(&thin_target);
        if (r)
-               return r;
+               goto bad_new_mapping_cache;
 
        r = dm_register_target(&pool_target);
        if (r)
-               goto bad_pool_target;
-
-       r = -ENOMEM;
-
-       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
-       if (!_new_mapping_cache)
-               goto bad_new_mapping_cache;
+               goto bad_thin_target;
 
        return 0;
 
-bad_new_mapping_cache:
-       dm_unregister_target(&pool_target);
-bad_pool_target:
+bad_thin_target:
        dm_unregister_target(&thin_target);
+bad_new_mapping_cache:
+       kmem_cache_destroy(_new_mapping_cache);
 
        return r;
 }
index 41c050b..4e4dee0 100644 (file)
@@ -7605,7 +7605,9 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
                if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
                        /* Still cleaning up */
                        resync = max_sectors;
-       } else
+       } else if (resync > max_sectors)
+               resync = max_sectors;
+       else
                resync -= atomic_read(&mddev->recovery_active);
 
        if (resync == 0) {
index f21ce6a..58b3197 100644 (file)
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
        pn->keys[1] = rn->keys[0];
        memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
 
-       /*
-        * rejig the spine.  This is ugly, since it knows too
-        * much about the spine
-        */
-       if (s->nodes[0] != new_parent) {
-               unlock_block(s->info, s->nodes[0]);
-               s->nodes[0] = new_parent;
-       }
-       if (key < le64_to_cpu(rn->keys[0])) {
-               unlock_block(s->info, right);
-               s->nodes[1] = left;
-       } else {
-               unlock_block(s->info, left);
-               s->nodes[1] = right;
-       }
-       s->count = 2;
-
+       unlock_block(s->info, left);
+       unlock_block(s->info, right);
        return 0;
 }
 
index cc9d337..6df398e 100644 (file)
@@ -809,11 +809,15 @@ static void flush_pending_writes(struct r1conf *conf)
        spin_lock_irq(&conf->device_lock);
 
        if (conf->pending_bio_list.head) {
+               struct blk_plug plug;
                struct bio *bio;
+
                bio = bio_list_get(&conf->pending_bio_list);
                conf->pending_count = 0;
                spin_unlock_irq(&conf->device_lock);
+               blk_start_plug(&plug);
                flush_bio_list(conf, bio);
+               blk_finish_plug(&plug);
        } else
                spin_unlock_irq(&conf->device_lock);
 }
index b9edbc7..c131835 100644 (file)
@@ -894,10 +894,13 @@ static void flush_pending_writes(struct r10conf *conf)
        spin_lock_irq(&conf->device_lock);
 
        if (conf->pending_bio_list.head) {
+               struct blk_plug plug;
                struct bio *bio;
+
                bio = bio_list_get(&conf->pending_bio_list);
                conf->pending_count = 0;
                spin_unlock_irq(&conf->device_lock);
+               blk_start_plug(&plug);
                /* flush any pending bitmap writes to disk
                 * before proceeding w/ I/O */
                bitmap_unplug(conf->mddev->bitmap);
@@ -918,6 +921,7 @@ static void flush_pending_writes(struct r10conf *conf)
                                generic_make_request(bio);
                        bio = next;
                }
+               blk_finish_plug(&plug);
        } else
                spin_unlock_irq(&conf->device_lock);
 }
index f1c86d9..39f31f0 100644 (file)
@@ -2577,31 +2577,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
 int r5c_journal_mode_set(struct mddev *mddev, int mode)
 {
        struct r5conf *conf;
-       int err;
 
        if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
            mode > R5C_JOURNAL_MODE_WRITE_BACK)
                return -EINVAL;
 
-       err = mddev_lock(mddev);
-       if (err)
-               return err;
        conf = mddev->private;
-       if (!conf || !conf->log) {
-               mddev_unlock(mddev);
+       if (!conf || !conf->log)
                return -ENODEV;
-       }
 
        if (raid5_calc_degraded(conf) > 0 &&
-           mode == R5C_JOURNAL_MODE_WRITE_BACK) {
-               mddev_unlock(mddev);
+           mode == R5C_JOURNAL_MODE_WRITE_BACK)
                return -EINVAL;
-       }
 
        mddev_suspend(mddev);
        conf->log->r5c_journal_mode = mode;
        mddev_resume(mddev);
-       mddev_unlock(mddev);
 
        pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
                 mdname(mddev), mode, r5c_journal_mode_str[mode]);
@@ -2614,6 +2605,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
 {
        int mode = ARRAY_SIZE(r5c_journal_mode_str);
        size_t len = length;
+       int ret;
 
        if (len < 2)
                return -EINVAL;
@@ -2625,8 +2617,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
                if (strlen(r5c_journal_mode_str[mode]) == len &&
                    !strncmp(page, r5c_journal_mode_str[mode], len))
                        break;
-
-       return r5c_journal_mode_set(mddev, mode) ?: length;
+       ret = mddev_lock(mddev);
+       if (ret)
+               return ret;
+       ret = r5c_journal_mode_set(mddev, mode);
+       mddev_unlock(mddev);
+       return ret ?: length;
 }
 
 struct md_sysfs_entry
index 31dc25e..98ce427 100644 (file)
@@ -2677,13 +2677,13 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
        pr_debug("raid456: error called\n");
 
        spin_lock_irqsave(&conf->device_lock, flags);
+       set_bit(Faulty, &rdev->flags);
        clear_bit(In_sync, &rdev->flags);
        mddev->degraded = raid5_calc_degraded(conf);
        spin_unlock_irqrestore(&conf->device_lock, flags);
        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 
        set_bit(Blocked, &rdev->flags);
-       set_bit(Faulty, &rdev->flags);
        set_mask_bits(&mddev->sb_flags, 0,
                      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
        pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
index e4ea2a0..c5c827e 100644 (file)
@@ -521,13 +521,13 @@ static void list_add_locked(struct list_head *new, struct list_head *head,
        spin_unlock_irqrestore(lock, flags);
 }
 
-/**
+/*
  * register a client callback that called when device plugged in/unplugged
  * NOTE: if devices exist callback is called immediately for each device
  *
  * @param hotplug callback
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_register_hotplug(hotplug_t hotplug)
 {
@@ -562,7 +562,7 @@ int smscore_register_hotplug(hotplug_t hotplug)
 }
 EXPORT_SYMBOL_GPL(smscore_register_hotplug);
 
-/**
+/*
  * unregister a client callback that called when device plugged in/unplugged
  *
  * @param hotplug callback
@@ -636,7 +636,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
        return cb;
 }
 
-/**
+/*
  * creates coredev object for a device, prepares buffers,
  * creates buffer mappings, notifies registered hotplugs about new device.
  *
@@ -644,7 +644,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
  *               and handlers
  * @param coredev pointer to a value that receives created coredev object
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_register_device(struct smsdevice_params_t *params,
                            struct smscore_device_t **coredev,
@@ -764,10 +764,10 @@ static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
                        0 : -ETIME;
 }
 
-/**
+/*
  * Starts & enables IR operations
  *
- * @return 0 on success, < 0 on error.
+ * return: 0 on success, < 0 on error.
  */
 static int smscore_init_ir(struct smscore_device_t *coredev)
 {
@@ -812,13 +812,13 @@ static int smscore_init_ir(struct smscore_device_t *coredev)
        return 0;
 }
 
-/**
+/*
  * configures device features according to board configuration structure.
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static int smscore_configure_board(struct smscore_device_t *coredev)
 {
@@ -861,13 +861,13 @@ static int smscore_configure_board(struct smscore_device_t *coredev)
        return 0;
 }
 
-/**
+/*
  * sets initial device mode and notifies client hotplugs that device is ready
  *
  * @param coredev pointer to a coredev object returned by
  *               smscore_register_device
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_start_device(struct smscore_device_t *coredev)
 {
@@ -1087,7 +1087,7 @@ static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = {
        },
 };
 
-/**
+/*
  * get firmware file name from one of the two mechanisms : sms_boards or
  * smscore_fw_lkup.
  * @param coredev pointer to a coredev object returned by
@@ -1096,7 +1096,7 @@ static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = {
  * @param lookup if 1, always get the fw filename from smscore_fw_lkup
  *      table. if 0, try first to get from sms_boards
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
                              int mode)
@@ -1125,7 +1125,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
        return fw[mode];
 }
 
-/**
+/*
  * loads specified firmware into a buffer and calls device loadfirmware_handler
  *
  * @param coredev pointer to a coredev object returned by
@@ -1133,7 +1133,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
  * @param filename null-terminated string specifies firmware file name
  * @param loadfirmware_handler device handler that loads firmware
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
                                           int mode,
@@ -1182,14 +1182,14 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
        return rc;
 }
 
-/**
+/*
  * notifies all clients registered with the device, notifies hotplugs,
  * frees all buffers and coredev object
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 void smscore_unregister_device(struct smscore_device_t *coredev)
 {
@@ -1282,14 +1282,14 @@ static int smscore_detect_mode(struct smscore_device_t *coredev)
        return rc;
 }
 
-/**
+/*
  * send init device request and wait for response
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  * @param mode requested mode of operation
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static int smscore_init_device(struct smscore_device_t *coredev, int mode)
 {
@@ -1315,7 +1315,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
        return rc;
 }
 
-/**
+/*
  * calls device handler to change mode of operation
  * NOTE: stellar/usb may disconnect when changing mode
  *
@@ -1323,7 +1323,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
  *                smscore_register_device
  * @param mode requested mode of operation
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
 {
@@ -1411,13 +1411,13 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
        return rc;
 }
 
-/**
+/*
  * calls device handler to get current mode of operation
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  *
- * @return current mode
+ * return: current mode
  */
 int smscore_get_device_mode(struct smscore_device_t *coredev)
 {
@@ -1425,7 +1425,7 @@ int smscore_get_device_mode(struct smscore_device_t *coredev)
 }
 EXPORT_SYMBOL_GPL(smscore_get_device_mode);
 
-/**
+/*
  * find client by response id & type within the clients list.
  * return client handle or NULL.
  *
@@ -1462,7 +1462,7 @@ found:
        return client;
 }
 
-/**
+/*
  * find client by response id/type, call clients onresponse handler
  * return buffer to pool on error
  *
@@ -1615,13 +1615,13 @@ void smscore_onresponse(struct smscore_device_t *coredev,
 }
 EXPORT_SYMBOL_GPL(smscore_onresponse);
 
-/**
+/*
  * return pointer to next free buffer descriptor from core pool
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  *
- * @return pointer to descriptor on success, NULL on error.
+ * return: pointer to descriptor on success, NULL on error.
  */
 
 static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
@@ -1648,7 +1648,7 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
 }
 EXPORT_SYMBOL_GPL(smscore_getbuffer);
 
-/**
+/*
  * return buffer descriptor to a pool
  *
  * @param coredev pointer to a coredev object returned by
@@ -1693,7 +1693,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
        return 0;
 }
 
-/**
+/*
  * creates smsclient object, check that id is taken by another client
  *
  * @param coredev pointer to a coredev object from clients hotplug
@@ -1705,7 +1705,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
  * @param context client-specific context
  * @param client pointer to a value that receives created smsclient object
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_register_client(struct smscore_device_t *coredev,
                            struct smsclient_params_t *params,
@@ -1740,7 +1740,7 @@ int smscore_register_client(struct smscore_device_t *coredev,
 }
 EXPORT_SYMBOL_GPL(smscore_register_client);
 
-/**
+/*
  * frees smsclient object and all subclients associated with it
  *
  * @param client pointer to smsclient object returned by
@@ -1771,7 +1771,7 @@ void smscore_unregister_client(struct smscore_client_t *client)
 }
 EXPORT_SYMBOL_GPL(smscore_unregister_client);
 
-/**
+/*
  * verifies that source id is not taken by another client,
  * calls device handler to send requests to the device
  *
@@ -1780,7 +1780,7 @@ EXPORT_SYMBOL_GPL(smscore_unregister_client);
  * @param buffer pointer to a request buffer
  * @param size size (in bytes) of request buffer
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smsclient_sendrequest(struct smscore_client_t *client,
                          void *buffer, size_t size)
index 95b3723..d48b61e 100644 (file)
@@ -206,7 +206,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
  * @hlen: Number of bytes in haystack.
  * @needle: Buffer to find.
  * @nlen: Number of bytes in needle.
- * @return Pointer into haystack needle was found at, or NULL if not found.
+ * return: Pointer into haystack needle was found at, or NULL if not found.
  */
 static char *findstr(char *haystack, int hlen, char *needle, int nlen)
 {
@@ -226,7 +226,7 @@ static char *findstr(char *haystack, int hlen, char *needle, int nlen)
 /* ************************************************************************** */
 /* EN50221 physical interface functions */
 
-/**
+/*
  * dvb_ca_en50221_check_camstatus - Check CAM status.
  */
 static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
@@ -275,9 +275,9 @@ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
  * @ca: CA instance.
  * @slot: Slot on interface.
  * @waitfor: Flags to wait for.
- * @timeout_ms: Timeout in milliseconds.
+ * @timeout_hz: Timeout in milliseconds.
  *
- * @return 0 on success, nonzero on error.
+ * return: 0 on success, nonzero on error.
  */
 static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
                                         u8 waitfor, int timeout_hz)
@@ -325,7 +325,7 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
  * @ca: CA instance.
  * @slot: Slot id.
  *
- * @return 0 on success, nonzero on failure.
+ * return: 0 on success, nonzero on failure.
  */
 static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
 {
@@ -397,11 +397,11 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
  * @ca: CA instance.
  * @slot: Slot id.
  * @address: Address to read from. Updated.
- * @tupleType: Tuple id byte. Updated.
- * @tupleLength: Tuple length. Updated.
+ * @tuple_type: Tuple id byte. Updated.
+ * @tuple_length: Tuple length. Updated.
  * @tuple: Dest buffer for tuple (must be 256 bytes). Updated.
  *
- * @return 0 on success, nonzero on error.
+ * return: 0 on success, nonzero on error.
  */
 static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
                                     int *address, int *tuple_type,
@@ -455,7 +455,7 @@ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
  * @ca: CA instance.
  * @slot: Slot id.
  *
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
  */
 static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
 {
@@ -632,10 +632,11 @@ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
  * @ca: CA instance.
  * @slot: Slot to read from.
  * @ebuf: If non-NULL, the data will be written to this buffer. If NULL,
- * the data will be added into the buffering system as a normal fragment.
+ *       the data will be added into the buffering system as a normal
+ *       fragment.
  * @ecount: Size of ebuf. Ignored if ebuf is NULL.
  *
- * @return Number of bytes read, or < 0 on error
+ * return: Number of bytes read, or < 0 on error
  */
 static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
                                    u8 *ebuf, int ecount)
@@ -784,11 +785,11 @@ exit:
  *
  * @ca: CA instance.
  * @slot: Slot to write to.
- * @ebuf: The data in this buffer is treated as a complete link-level packet to
- * be written.
- * @count: Size of ebuf.
+ * @buf: The data in this buffer is treated as a complete link-level packet to
+ *      be written.
+ * @bytes_write: Size of ebuf.
  *
- * @return Number of bytes written, or < 0 on error.
+ * return: Number of bytes written, or < 0 on error.
  */
 static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
                                     u8 *buf, int bytes_write)
@@ -933,7 +934,7 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
 /**
  * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
  *
- * @ca: CA instance.
+ * @pubca: CA instance.
  * @slot: Slot concerned.
  * @change_type: One of the DVB_CA_CAMCHANGE_* values.
  */
@@ -963,7 +964,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
 /**
  * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
  *
- * @ca: CA instance.
+ * @pubca: CA instance.
  * @slot: Slot concerned.
  */
 void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
@@ -983,7 +984,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
 /**
  * dvb_ca_en50221_frda_irq - An FR or DA IRQ has occurred.
  *
- * @ca: CA instance.
+ * @pubca: CA instance.
  * @slot: Slot concerned.
  */
 void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
@@ -1091,7 +1092,7 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
  *
  * @ca: CA instance.
  * @slot: Slot to process.
- * @return: 0 .. no change
+ * return:: 0 .. no change
  *          1 .. CAM state changed
  */
 
@@ -1296,7 +1297,7 @@ static void dvb_ca_en50221_thread_state_machine(struct dvb_ca_private *ca,
        mutex_unlock(&sl->slot_lock);
 }
 
-/**
+/*
  * Kernel thread which monitors CA slots for CAM changes, and performs data
  * transfers.
  */
@@ -1336,12 +1337,11 @@ static int dvb_ca_en50221_thread(void *data)
  * Real ioctl implementation.
  * NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them.
  *
- * @inode: Inode concerned.
  * @file: File concerned.
  * @cmd: IOCTL command.
- * @arg: Associated argument.
+ * @parg: Associated argument.
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static int dvb_ca_en50221_io_do_ioctl(struct file *file,
                                      unsigned int cmd, void *parg)
@@ -1420,12 +1420,11 @@ out_unlock:
 /**
  * Wrapper for ioctl implementation.
  *
- * @inode: Inode concerned.
  * @file: File concerned.
  * @cmd: IOCTL command.
  * @arg: Associated argument.
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static long dvb_ca_en50221_io_ioctl(struct file *file,
                                    unsigned int cmd, unsigned long arg)
@@ -1441,7 +1440,7 @@ static long dvb_ca_en50221_io_ioctl(struct file *file,
  * @count: Size of source buffer.
  * @ppos: Position in file (ignored).
  *
- * @return Number of bytes read, or <0 on error.
+ * return: Number of bytes read, or <0 on error.
  */
 static ssize_t dvb_ca_en50221_io_write(struct file *file,
                                       const char __user *buf, size_t count,
@@ -1536,7 +1535,7 @@ exit:
        return status;
 }
 
-/**
+/*
  * Condition for waking up in dvb_ca_en50221_io_read_condition
  */
 static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca,
@@ -1593,7 +1592,7 @@ nextslot:
  * @count: Size of destination buffer.
  * @ppos: Position in file (ignored).
  *
- * @return Number of bytes read, or <0 on error.
+ * return: Number of bytes read, or <0 on error.
  */
 static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
                                      size_t count, loff_t *ppos)
@@ -1702,7 +1701,7 @@ exit:
  * @inode: Inode concerned.
  * @file: File concerned.
  *
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
  */
 static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
 {
@@ -1752,7 +1751,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
  * @inode: Inode concerned.
  * @file: File concerned.
  *
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
  */
 static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
 {
@@ -1781,7 +1780,7 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
  * @file: File concerned.
  * @wait: poll wait table.
  *
- * @return Standard poll mask.
+ * return: Standard poll mask.
  */
 static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
 {
@@ -1838,11 +1837,11 @@ static const struct dvb_device dvbdev_ca = {
  * Initialise a new DVB CA EN50221 interface device.
  *
  * @dvb_adapter: DVB adapter to attach the new CA device to.
- * @ca: The dvb_ca instance.
+ * @pubca: The dvb_ca instance.
  * @flags: Flags describing the CA device (DVB_CA_FLAG_*).
  * @slot_count: Number of slots supported.
  *
- * @return 0 on success, nonzero on failure
+ * return: 0 on success, nonzero on failure
  */
 int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
                        struct dvb_ca_en50221 *pubca, int flags, int slot_count)
@@ -1929,8 +1928,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_init);
 /**
  * Release a DVB CA EN50221 interface device.
  *
- * @ca_dev: The dvb_device_t instance for the CA device.
- * @ca: The associated dvb_ca instance.
+ * @pubca: The associated dvb_ca instance.
  */
 void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
 {
index 3ad8335..2afaa82 100644 (file)
@@ -369,11 +369,14 @@ static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepr
 }
 
 /**
- * Performs automatic twiddling of frontend parameters.
+ * dvb_frontend_swzigzag_autotune - Performs automatic twiddling of frontend
+ *     parameters.
  *
- * @param fe The frontend concerned.
- * @param check_wrapped Checks if an iteration has completed. DO NOT SET ON THE FIRST ATTEMPT
- * @returns Number of complete iterations that have been performed.
+ * @fe: The frontend concerned.
+ * @check_wrapped: Checks if an iteration has completed.
+ *                DO NOT SET ON THE FIRST ATTEMPT.
+ *
+ * return: Number of complete iterations that have been performed.
  */
 static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped)
 {
@@ -1253,7 +1256,7 @@ dtv_property_legacy_params_sync(struct dvb_frontend *fe,
  * dtv_get_frontend - calls a callback for retrieving DTV parameters
  * @fe:                struct dvb_frontend pointer
  * @c:         struct dtv_frontend_properties pointer (DVBv5 cache)
- * @p_out      struct dvb_frontend_parameters pointer (DVBv3 FE struct)
+ * @p_out:     struct dvb_frontend_parameters pointer (DVBv3 FE struct)
  *
  * This routine calls either the DVBv3 or DVBv5 get_frontend call.
  * If c is not null, it will update the DVBv5 cache struct pointed by it.
index 06b0dcc..c018e3c 100644 (file)
@@ -125,7 +125,7 @@ struct dvb_net_priv {
 };
 
 
-/**
+/*
  *     Determine the packet's protocol ID. The rule here is that we
  *     assume 802.3 if the type field is short enough to be a length.
  *     This is normal practice and works for any 'now in use' protocol.
@@ -155,7 +155,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
 
        rawp = skb->data;
 
-       /**
+       /*
         *      This is a magic hack to spot IPX packets. Older Novell breaks
         *      the protocol design and runs IPX over 802.3 without an 802.2 LLC
         *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
@@ -164,7 +164,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
        if (*(unsigned short *)rawp == 0xFFFF)
                return htons(ETH_P_802_3);
 
-       /**
+       /*
         *      Real 802.2 LLC
         */
        return htons(ETH_P_802_2);
@@ -215,7 +215,8 @@ static int ule_exthdr_padding(struct dvb_net_priv *p)
        return 0;
 }
 
-/** Handle ULE extension headers.
+/*
+ * Handle ULE extension headers.
  *  Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding.
  *  Returns: >= 0: nr. of bytes consumed by next extension header
  *          -1:   Mandatory extension header that is not recognized or TEST SNDU; discard.
@@ -291,7 +292,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
 }
 
 
-/** Prepare for a new ULE SNDU: reset the decoder state. */
+/* Prepare for a new ULE SNDU: reset the decoder state. */
 static inline void reset_ule( struct dvb_net_priv *p )
 {
        p->ule_skb = NULL;
@@ -304,7 +305,7 @@ static inline void reset_ule( struct dvb_net_priv *p )
        p->ule_bridged = 0;
 }
 
-/**
+/*
  * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
  * TS cells of a single PID.
  */
@@ -1005,7 +1006,7 @@ static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
 {
        struct net_device *dev = filter->priv;
 
-       /**
+       /*
         * we rely on the DVB API definition where exactly one complete
         * section is delivered in buffer1
         */
index 3532745..a290722 100644 (file)
  * @api_version: Firmware API version.
  * @gpio: GPIOs.
  * @get_dvb_frontend: Get DVB frontend callback.
+ *
+ * AF9013/5 GPIOs (mostly guessed):
+ *   * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
+ *   * demod#1-gpio#1 - xtal setting (?)
+ *   * demod#1-gpio#3 - tuner#1
+ *   * demod#2-gpio#0 - tuner#2
+ *   * demod#2-gpio#1 - xtal setting (?)
  */
 struct af9013_platform_data {
        /*
@@ -89,16 +96,15 @@ struct af9013_platform_data {
 #define AF9013_TS_PARALLEL  AF9013_TS_MODE_PARALLEL
 #define AF9013_TS_SERIAL    AF9013_TS_MODE_SERIAL
 
-/*
- * AF9013/5 GPIOs (mostly guessed)
- * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
- * demod#1-gpio#1 - xtal setting (?)
- * demod#1-gpio#3 - tuner#1
- * demod#2-gpio#0 - tuner#2
- * demod#2-gpio#1 - xtal setting (?)
- */
-
 #if IS_REACHABLE(CONFIG_DVB_AF9013)
+/**
+ * Attach an af9013 demod
+ *
+ * @config: pointer to &struct af9013_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *af9013_attach(const struct af9013_config *config,
        struct i2c_adapter *i2c);
 #else
index dc61bf7..418c565 100644 (file)
@@ -41,6 +41,15 @@ struct ascot2e_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_ASCOT2E)
+/**
+ * Attach an ascot2e tuner
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct ascot2e_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
                                        const struct ascot2e_config *config,
                                        struct i2c_adapter *i2c);
index f3ff8f6..a49400c 100644 (file)
@@ -49,7 +49,6 @@
  * @gpio_chip_base: GPIO.
  * @get_dvb_frontend: Get DVB frontend.
  */
-
 struct cxd2820r_platform_data {
        u8 ts_mode;
        bool ts_clk_inv;
@@ -62,6 +61,17 @@ struct cxd2820r_platform_data {
        bool attach_in_use;
 };
 
+/**
+ * struct cxd2820r_config - configuration for cxd2020r demod
+ *
+ * @i2c_address: Demodulator I2C address. Driver determines DVB-C slave I2C
+ *              address automatically from master address.
+ *              Default: none, must set. Values: 0x6c, 0x6d.
+ * @ts_mode:   TS output mode. Default: none, must set. Values: FIXME?
+ * @ts_clock_inv: TS clock inverted. Default: 0. Values: 0, 1.
+ * @if_agc_polarity: Default: 0. Values: 0, 1
+ * @spec_inv:  Spectrum inversion. Default: 0. Values: 0, 1.
+ */
 struct cxd2820r_config {
        /* Demodulator I2C address.
         * Driver determines DVB-C slave I2C address automatically from master
@@ -98,6 +108,18 @@ struct cxd2820r_config {
 
 
 #if IS_REACHABLE(CONFIG_DVB_CXD2820R)
+/**
+ * Attach a cxd2820r demod
+ *
+ * @config: pointer to &struct cxd2820r_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ * @gpio_chip_base: if zero, disables GPIO setting. Otherwise, if
+ *                 CONFIG_GPIOLIB is set dynamically allocate
+ *                 gpio base; if is not set, use its value to
+ *                 setup the GPIO pins.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *cxd2820r_attach(
        const struct cxd2820r_config *config,
        struct i2c_adapter *i2c,
index 5b5421f..2b3af24 100644 (file)
@@ -52,7 +52,7 @@ struct i2c_device_addr {
 };
 
 
-/**
+/*
 * \def IS_I2C_10BIT( addr )
 * \brief Determine if I2C address 'addr' is a 10 bits address or not.
 * \param addr The I2C address.
@@ -67,7 +67,7 @@ struct i2c_device_addr {
 Exported FUNCTIONS
 ------------------------------------------------------------------------------*/
 
-/**
+/*
 * \fn drxbsp_i2c_init()
 * \brief Initialize I2C communication module.
 * \return drx_status_t Return status.
@@ -76,7 +76,7 @@ Exported FUNCTIONS
 */
        drx_status_t drxbsp_i2c_init(void);
 
-/**
+/*
 * \fn drxbsp_i2c_term()
 * \brief Terminate I2C communication module.
 * \return drx_status_t Return status.
@@ -85,7 +85,7 @@ Exported FUNCTIONS
 */
        drx_status_t drxbsp_i2c_term(void);
 
-/**
+/*
 * \fn drx_status_t drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
 *                                       u16 w_count,
 *                                       u8 *wData,
@@ -121,7 +121,7 @@ Exported FUNCTIONS
                                         struct i2c_device_addr *r_dev_addr,
                                         u16 r_count, u8 *r_data);
 
-/**
+/*
 * \fn drxbsp_i2c_error_text()
 * \brief Returns a human readable error.
 * Counter part of numerical drx_i2c_error_g.
@@ -130,7 +130,7 @@ Exported FUNCTIONS
 */
        char *drxbsp_i2c_error_text(void);
 
-/**
+/*
 * \var drx_i2c_error_g;
 * \brief I2C specific error codes, platform dependent.
 */
index cd69e18..855685b 100644 (file)
@@ -46,7 +46,7 @@ struct i2c_device_addr {
        void *user_data;                /* User data pointer */
 };
 
-/**
+/*
 * \def IS_I2C_10BIT( addr )
 * \brief Determine if I2C address 'addr' is a 10 bits address or not.
 * \param addr The I2C address.
@@ -61,7 +61,7 @@ struct i2c_device_addr {
 Exported FUNCTIONS
 ------------------------------------------------------------------------------*/
 
-/**
+/*
 * \fn drxbsp_i2c_init()
 * \brief Initialize I2C communication module.
 * \return int Return status.
@@ -70,7 +70,7 @@ Exported FUNCTIONS
 */
 int drxbsp_i2c_init(void);
 
-/**
+/*
 * \fn drxbsp_i2c_term()
 * \brief Terminate I2C communication module.
 * \return int Return status.
@@ -79,7 +79,7 @@ int drxbsp_i2c_init(void);
 */
 int drxbsp_i2c_term(void);
 
-/**
+/*
 * \fn int drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
 *                                       u16 w_count,
 *                                       u8 * wData,
@@ -115,7 +115,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
                                        struct i2c_device_addr *r_dev_addr,
                                        u16 r_count, u8 *r_data);
 
-/**
+/*
 * \fn drxbsp_i2c_error_text()
 * \brief Returns a human readable error.
 * Counter part of numerical drx_i2c_error_g.
@@ -124,7 +124,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
 */
 char *drxbsp_i2c_error_text(void);
 
-/**
+/*
 * \var drx_i2c_error_g;
 * \brief I2C specific error codes, platform dependent.
 */
@@ -241,13 +241,13 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
                                                struct i2c_device_addr *r_dev_addr,
                                                u16 r_count, u8 *r_data);
 
-/**************
+/*************
 *
 * This section configures the DRX Data Access Protocols (DAPs).
 *
 **************/
 
-/**
+/*
 * \def DRXDAP_SINGLE_MASTER
 * \brief Enable I2C single or I2C multimaster mode on host.
 *
@@ -262,7 +262,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define DRXDAP_SINGLE_MASTER 1
 #endif
 
-/**
+/*
 * \def DRXDAP_MAX_WCHUNKSIZE
 * \brief Defines maximum chunksize of an i2c write action by host.
 *
@@ -282,7 +282,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define  DRXDAP_MAX_WCHUNKSIZE 60
 #endif
 
-/**
+/*
 * \def DRXDAP_MAX_RCHUNKSIZE
 * \brief Defines maximum chunksize of an i2c read action by host.
 *
@@ -297,13 +297,13 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define  DRXDAP_MAX_RCHUNKSIZE 60
 #endif
 
-/**************
+/*************
 *
 * This section describes drxdriver defines.
 *
 **************/
 
-/**
+/*
 * \def DRX_UNKNOWN
 * \brief Generic UNKNOWN value for DRX enumerated types.
 *
@@ -313,7 +313,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define DRX_UNKNOWN (254)
 #endif
 
-/**
+/*
 * \def DRX_AUTO
 * \brief Generic AUTO value for DRX enumerated types.
 *
@@ -324,104 +324,104 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define DRX_AUTO    (255)
 #endif
 
-/**************
+/*************
 *
 * This section describes flag definitions for the device capbilities.
 *
 **************/
 
-/**
+/*
 * \brief LNA capability flag
 *
 * Device has a Low Noise Amplifier
 *
 */
 #define DRX_CAPABILITY_HAS_LNA           (1UL <<  0)
-/**
+/*
 * \brief OOB-RX capability flag
 *
 * Device has OOB-RX
 *
 */
 #define DRX_CAPABILITY_HAS_OOBRX         (1UL <<  1)
-/**
+/*
 * \brief ATV capability flag
 *
 * Device has ATV
 *
 */
 #define DRX_CAPABILITY_HAS_ATV           (1UL <<  2)
-/**
+/*
 * \brief DVB-T capability flag
 *
 * Device has DVB-T
 *
 */
 #define DRX_CAPABILITY_HAS_DVBT          (1UL <<  3)
-/**
+/*
 * \brief  ITU-B capability flag
 *
 * Device has ITU-B
 *
 */
 #define DRX_CAPABILITY_HAS_ITUB          (1UL <<  4)
-/**
+/*
 * \brief  Audio capability flag
 *
 * Device has Audio
 *
 */
 #define DRX_CAPABILITY_HAS_AUD           (1UL <<  5)
-/**
+/*
 * \brief  SAW switch capability flag
 *
 * Device has SAW switch
 *
 */
 #define DRX_CAPABILITY_HAS_SAWSW         (1UL <<  6)
-/**
+/*
 * \brief  GPIO1 capability flag
 *
 * Device has GPIO1
 *
 */
 #define DRX_CAPABILITY_HAS_GPIO1         (1UL <<  7)
-/**
+/*
 * \brief  GPIO2 capability flag
 *
 * Device has GPIO2
 *
 */
 #define DRX_CAPABILITY_HAS_GPIO2         (1UL <<  8)
-/**
+/*
 * \brief  IRQN capability flag
 *
 * Device has IRQN
 *
 */
 #define DRX_CAPABILITY_HAS_IRQN          (1UL <<  9)
-/**
+/*
 * \brief  8VSB capability flag
 *
 * Device has 8VSB
 *
 */
 #define DRX_CAPABILITY_HAS_8VSB          (1UL << 10)
-/**
+/*
 * \brief  SMA-TX capability flag
 *
 * Device has SMATX
 *
 */
 #define DRX_CAPABILITY_HAS_SMATX         (1UL << 11)
-/**
+/*
 * \brief  SMA-RX capability flag
 *
 * Device has SMARX
 *
 */
 #define DRX_CAPABILITY_HAS_SMARX         (1UL << 12)
-/**
+/*
 * \brief  ITU-A/C capability flag
 *
 * Device has ITU-A/C
@@ -439,7 +439,7 @@ MACROS
         DRX_VERSIONSTRING_HELP(PATCH)
 #define DRX_VERSIONSTRING_HELP(NUM) #NUM
 
-/**
+/*
 * \brief Macro to create byte array elements from 16 bit integers.
 * This macro is used to create byte arrays for block writes.
 * Block writes speed up I2C traffic between host and demod.
@@ -449,7 +449,7 @@ MACROS
 #define DRX_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
                        ((u8)((((u16)x)>>8)&0xFF))
 
-/**
+/*
 * \brief Macro to convert 16 bit register value to a s32
 */
 #define DRX_U16TODRXFREQ(x)   ((x & 0x8000) ? \
@@ -461,191 +461,191 @@ MACROS
 ENUM
 -------------------------------------------------------------------------*/
 
-/**
+/*
 * \enum enum drx_standard
 * \brief Modulation standards.
 */
 enum drx_standard {
-       DRX_STANDARD_DVBT = 0, /**< Terrestrial DVB-T.               */
-       DRX_STANDARD_8VSB,     /**< Terrestrial 8VSB.                */
-       DRX_STANDARD_NTSC,     /**< Terrestrial\Cable analog NTSC.   */
+       DRX_STANDARD_DVBT = 0, /*< Terrestrial DVB-T.               */
+       DRX_STANDARD_8VSB,     /*< Terrestrial 8VSB.                */
+       DRX_STANDARD_NTSC,     /*< Terrestrial\Cable analog NTSC.   */
        DRX_STANDARD_PAL_SECAM_BG,
-                               /**< Terrestrial analog PAL/SECAM B/G */
+                               /*< Terrestrial analog PAL/SECAM B/G */
        DRX_STANDARD_PAL_SECAM_DK,
-                               /**< Terrestrial analog PAL/SECAM D/K */
+                               /*< Terrestrial analog PAL/SECAM D/K */
        DRX_STANDARD_PAL_SECAM_I,
-                               /**< Terrestrial analog PAL/SECAM I   */
+                               /*< Terrestrial analog PAL/SECAM I   */
        DRX_STANDARD_PAL_SECAM_L,
-                               /**< Terrestrial analog PAL/SECAM L
+                               /*< Terrestrial analog PAL/SECAM L
                                        with negative modulation        */
        DRX_STANDARD_PAL_SECAM_LP,
-                               /**< Terrestrial analog PAL/SECAM L
+                               /*< Terrestrial analog PAL/SECAM L
                                        with positive modulation        */
-       DRX_STANDARD_ITU_A,    /**< Cable ITU ANNEX A.               */
-       DRX_STANDARD_ITU_B,    /**< Cable ITU ANNEX B.               */
-       DRX_STANDARD_ITU_C,    /**< Cable ITU ANNEX C.               */
-       DRX_STANDARD_ITU_D,    /**< Cable ITU ANNEX D.               */
-       DRX_STANDARD_FM,       /**< Terrestrial\Cable FM radio       */
-       DRX_STANDARD_DTMB,     /**< Terrestrial DTMB standard (China)*/
+       DRX_STANDARD_ITU_A,    /*< Cable ITU ANNEX A.               */
+       DRX_STANDARD_ITU_B,    /*< Cable ITU ANNEX B.               */
+       DRX_STANDARD_ITU_C,    /*< Cable ITU ANNEX C.               */
+       DRX_STANDARD_ITU_D,    /*< Cable ITU ANNEX D.               */
+       DRX_STANDARD_FM,       /*< Terrestrial\Cable FM radio       */
+       DRX_STANDARD_DTMB,     /*< Terrestrial DTMB standard (China)*/
        DRX_STANDARD_UNKNOWN = DRX_UNKNOWN,
-                               /**< Standard unknown.                */
+                               /*< Standard unknown.                */
        DRX_STANDARD_AUTO = DRX_AUTO
-                               /**< Autodetect standard.             */
+                               /*< Autodetect standard.             */
 };
 
-/**
+/*
 * \enum enum drx_standard
 * \brief Modulation sub-standards.
 */
 enum drx_substandard {
-       DRX_SUBSTANDARD_MAIN = 0, /**< Main subvariant of standard   */
+       DRX_SUBSTANDARD_MAIN = 0, /*< Main subvariant of standard   */
        DRX_SUBSTANDARD_ATV_BG_SCANDINAVIA,
        DRX_SUBSTANDARD_ATV_DK_POLAND,
        DRX_SUBSTANDARD_ATV_DK_CHINA,
        DRX_SUBSTANDARD_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Sub-standard unknown.         */
+                                       /*< Sub-standard unknown.         */
        DRX_SUBSTANDARD_AUTO = DRX_AUTO
-                                       /**< Auto (default) sub-standard   */
+                                       /*< Auto (default) sub-standard   */
 };
 
-/**
+/*
 * \enum enum drx_bandwidth
 * \brief Channel bandwidth or channel spacing.
 */
 enum drx_bandwidth {
-       DRX_BANDWIDTH_8MHZ = 0,  /**< Bandwidth 8 MHz.   */
-       DRX_BANDWIDTH_7MHZ,      /**< Bandwidth 7 MHz.   */
-       DRX_BANDWIDTH_6MHZ,      /**< Bandwidth 6 MHz.   */
+       DRX_BANDWIDTH_8MHZ = 0,  /*< Bandwidth 8 MHz.   */
+       DRX_BANDWIDTH_7MHZ,      /*< Bandwidth 7 MHz.   */
+       DRX_BANDWIDTH_6MHZ,      /*< Bandwidth 6 MHz.   */
        DRX_BANDWIDTH_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Bandwidth unknown. */
+                                       /*< Bandwidth unknown. */
        DRX_BANDWIDTH_AUTO = DRX_AUTO
-                                       /**< Auto Set Bandwidth */
+                                       /*< Auto Set Bandwidth */
 };
 
-/**
+/*
 * \enum enum drx_mirror
 * \brief Indicate if channel spectrum is mirrored or not.
 */
 enum drx_mirror {
-       DRX_MIRROR_NO = 0,   /**< Spectrum is not mirrored.           */
-       DRX_MIRROR_YES,      /**< Spectrum is mirrored.               */
+       DRX_MIRROR_NO = 0,   /*< Spectrum is not mirrored.           */
+       DRX_MIRROR_YES,      /*< Spectrum is mirrored.               */
        DRX_MIRROR_UNKNOWN = DRX_UNKNOWN,
-                               /**< Unknown if spectrum is mirrored.    */
+                               /*< Unknown if spectrum is mirrored.    */
        DRX_MIRROR_AUTO = DRX_AUTO
-                               /**< Autodetect if spectrum is mirrored. */
+                               /*< Autodetect if spectrum is mirrored. */
 };
 
-/**
+/*
 * \enum enum drx_modulation
 * \brief Constellation type of the channel.
 */
 enum drx_modulation {
-       DRX_CONSTELLATION_BPSK = 0,  /**< Modulation is BPSK.       */
-       DRX_CONSTELLATION_QPSK,      /**< Constellation is QPSK.    */
-       DRX_CONSTELLATION_PSK8,      /**< Constellation is PSK8.    */
-       DRX_CONSTELLATION_QAM16,     /**< Constellation is QAM16.   */
-       DRX_CONSTELLATION_QAM32,     /**< Constellation is QAM32.   */
-       DRX_CONSTELLATION_QAM64,     /**< Constellation is QAM64.   */
-       DRX_CONSTELLATION_QAM128,    /**< Constellation is QAM128.  */
-       DRX_CONSTELLATION_QAM256,    /**< Constellation is QAM256.  */
-       DRX_CONSTELLATION_QAM512,    /**< Constellation is QAM512.  */
-       DRX_CONSTELLATION_QAM1024,   /**< Constellation is QAM1024. */
-       DRX_CONSTELLATION_QPSK_NR,   /**< Constellation is QPSK_NR  */
+       DRX_CONSTELLATION_BPSK = 0,  /*< Modulation is BPSK.       */
+       DRX_CONSTELLATION_QPSK,      /*< Constellation is QPSK.    */
+       DRX_CONSTELLATION_PSK8,      /*< Constellation is PSK8.    */
+       DRX_CONSTELLATION_QAM16,     /*< Constellation is QAM16.   */
+       DRX_CONSTELLATION_QAM32,     /*< Constellation is QAM32.   */
+       DRX_CONSTELLATION_QAM64,     /*< Constellation is QAM64.   */
+       DRX_CONSTELLATION_QAM128,    /*< Constellation is QAM128.  */
+       DRX_CONSTELLATION_QAM256,    /*< Constellation is QAM256.  */
+       DRX_CONSTELLATION_QAM512,    /*< Constellation is QAM512.  */
+       DRX_CONSTELLATION_QAM1024,   /*< Constellation is QAM1024. */
+       DRX_CONSTELLATION_QPSK_NR,   /*< Constellation is QPSK_NR  */
        DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Constellation unknown.    */
+                                       /*< Constellation unknown.    */
        DRX_CONSTELLATION_AUTO = DRX_AUTO
-                                       /**< Autodetect constellation. */
+                                       /*< Autodetect constellation. */
 };
 
-/**
+/*
 * \enum enum drx_hierarchy
 * \brief Hierarchy of the channel.
 */
 enum drx_hierarchy {
-       DRX_HIERARCHY_NONE = 0, /**< None hierarchical channel.     */
-       DRX_HIERARCHY_ALPHA1,   /**< Hierarchical channel, alpha=1. */
-       DRX_HIERARCHY_ALPHA2,   /**< Hierarchical channel, alpha=2. */
-       DRX_HIERARCHY_ALPHA4,   /**< Hierarchical channel, alpha=4. */
+       DRX_HIERARCHY_NONE = 0, /*< None hierarchical channel.     */
+       DRX_HIERARCHY_ALPHA1,   /*< Hierarchical channel, alpha=1. */
+       DRX_HIERARCHY_ALPHA2,   /*< Hierarchical channel, alpha=2. */
+       DRX_HIERARCHY_ALPHA4,   /*< Hierarchical channel, alpha=4. */
        DRX_HIERARCHY_UNKNOWN = DRX_UNKNOWN,
-                               /**< Hierarchy unknown.             */
+                               /*< Hierarchy unknown.             */
        DRX_HIERARCHY_AUTO = DRX_AUTO
-                               /**< Autodetect hierarchy.          */
+                               /*< Autodetect hierarchy.          */
 };
 
-/**
+/*
 * \enum enum drx_priority
 * \brief Channel priority in case of hierarchical transmission.
 */
 enum drx_priority {
-       DRX_PRIORITY_LOW = 0,  /**< Low priority channel.  */
-       DRX_PRIORITY_HIGH,     /**< High priority channel. */
+       DRX_PRIORITY_LOW = 0,  /*< Low priority channel.  */
+       DRX_PRIORITY_HIGH,     /*< High priority channel. */
        DRX_PRIORITY_UNKNOWN = DRX_UNKNOWN
-                               /**< Priority unknown.      */
+                               /*< Priority unknown.      */
 };
 
-/**
+/*
 * \enum enum drx_coderate
 * \brief Channel priority in case of hierarchical transmission.
 */
 enum drx_coderate {
-               DRX_CODERATE_1DIV2 = 0, /**< Code rate 1/2nd.      */
-               DRX_CODERATE_2DIV3,     /**< Code rate 2/3nd.      */
-               DRX_CODERATE_3DIV4,     /**< Code rate 3/4nd.      */
-               DRX_CODERATE_5DIV6,     /**< Code rate 5/6nd.      */
-               DRX_CODERATE_7DIV8,     /**< Code rate 7/8nd.      */
+               DRX_CODERATE_1DIV2 = 0, /*< Code rate 1/2nd.      */
+               DRX_CODERATE_2DIV3,     /*< Code rate 2/3nd.      */
+               DRX_CODERATE_3DIV4,     /*< Code rate 3/4nd.      */
+               DRX_CODERATE_5DIV6,     /*< Code rate 5/6nd.      */
+               DRX_CODERATE_7DIV8,     /*< Code rate 7/8nd.      */
                DRX_CODERATE_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Code rate unknown.    */
+                                       /*< Code rate unknown.    */
                DRX_CODERATE_AUTO = DRX_AUTO
-                                       /**< Autodetect code rate. */
+                                       /*< Autodetect code rate. */
 };
 
-/**
+/*
 * \enum enum drx_guard
 * \brief Guard interval of a channel.
 */
 enum drx_guard {
-       DRX_GUARD_1DIV32 = 0, /**< Guard interval 1/32nd.     */
-       DRX_GUARD_1DIV16,     /**< Guard interval 1/16th.     */
-       DRX_GUARD_1DIV8,      /**< Guard interval 1/8th.      */
-       DRX_GUARD_1DIV4,      /**< Guard interval 1/4th.      */
+       DRX_GUARD_1DIV32 = 0, /*< Guard interval 1/32nd.     */
+       DRX_GUARD_1DIV16,     /*< Guard interval 1/16th.     */
+       DRX_GUARD_1DIV8,      /*< Guard interval 1/8th.      */
+       DRX_GUARD_1DIV4,      /*< Guard interval 1/4th.      */
        DRX_GUARD_UNKNOWN = DRX_UNKNOWN,
-                               /**< Guard interval unknown.    */
+                               /*< Guard interval unknown.    */
        DRX_GUARD_AUTO = DRX_AUTO
-                               /**< Autodetect guard interval. */
+                               /*< Autodetect guard interval. */
 };
 
-/**
+/*
 * \enum enum drx_fft_mode
 * \brief FFT mode.
 */
 enum drx_fft_mode {
-       DRX_FFTMODE_2K = 0,    /**< 2K FFT mode.         */
-       DRX_FFTMODE_4K,        /**< 4K FFT mode.         */
-       DRX_FFTMODE_8K,        /**< 8K FFT mode.         */
+       DRX_FFTMODE_2K = 0,    /*< 2K FFT mode.         */
+       DRX_FFTMODE_4K,        /*< 4K FFT mode.         */
+       DRX_FFTMODE_8K,        /*< 8K FFT mode.         */
        DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN,
-                               /**< FFT mode unknown.    */
+                               /*< FFT mode unknown.    */
        DRX_FFTMODE_AUTO = DRX_AUTO
-                               /**< Autodetect FFT mode. */
+                               /*< Autodetect FFT mode. */
 };
 
-/**
+/*
 * \enum enum drx_classification
 * \brief Channel classification.
 */
 enum drx_classification {
-       DRX_CLASSIFICATION_GAUSS = 0, /**< Gaussion noise.            */
-       DRX_CLASSIFICATION_HVY_GAUSS, /**< Heavy Gaussion noise.      */
-       DRX_CLASSIFICATION_COCHANNEL, /**< Co-channel.                */
-       DRX_CLASSIFICATION_STATIC,    /**< Static echo.               */
-       DRX_CLASSIFICATION_MOVING,    /**< Moving echo.               */
-       DRX_CLASSIFICATION_ZERODB,    /**< Zero dB echo.              */
+       DRX_CLASSIFICATION_GAUSS = 0, /*< Gaussion noise.            */
+       DRX_CLASSIFICATION_HVY_GAUSS, /*< Heavy Gaussion noise.      */
+       DRX_CLASSIFICATION_COCHANNEL, /*< Co-channel.                */
+       DRX_CLASSIFICATION_STATIC,    /*< Static echo.               */
+       DRX_CLASSIFICATION_MOVING,    /*< Moving echo.               */
+       DRX_CLASSIFICATION_ZERODB,    /*< Zero dB echo.              */
        DRX_CLASSIFICATION_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Unknown classification     */
+                                       /*< Unknown classification     */
        DRX_CLASSIFICATION_AUTO = DRX_AUTO
-                                       /**< Autodetect classification. */
+                                       /*< Autodetect classification. */
 };
 
-/**
+/*
 * /enum enum drx_interleave_mode
 * /brief Interleave modes
 */
@@ -673,80 +673,80 @@ enum drx_interleave_mode {
        DRX_INTERLEAVEMODE_B52_M48,
        DRX_INTERLEAVEMODE_B52_M0,
        DRX_INTERLEAVEMODE_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Unknown interleave mode    */
+                                       /*< Unknown interleave mode    */
        DRX_INTERLEAVEMODE_AUTO = DRX_AUTO
-                                       /**< Autodetect interleave mode */
+                                       /*< Autodetect interleave mode */
 };
 
-/**
+/*
 * \enum enum drx_carrier_mode
 * \brief Channel Carrier Mode.
 */
 enum drx_carrier_mode {
-       DRX_CARRIER_MULTI = 0,          /**< Multi carrier mode       */
-       DRX_CARRIER_SINGLE,             /**< Single carrier mode      */
+       DRX_CARRIER_MULTI = 0,          /*< Multi carrier mode       */
+       DRX_CARRIER_SINGLE,             /*< Single carrier mode      */
        DRX_CARRIER_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Carrier mode unknown.    */
-       DRX_CARRIER_AUTO = DRX_AUTO     /**< Autodetect carrier mode  */
+                                       /*< Carrier mode unknown.    */
+       DRX_CARRIER_AUTO = DRX_AUTO     /*< Autodetect carrier mode  */
 };
 
-/**
+/*
 * \enum enum drx_frame_mode
 * \brief Channel Frame Mode.
 */
 enum drx_frame_mode {
-       DRX_FRAMEMODE_420 = 0,   /**< 420 with variable PN  */
-       DRX_FRAMEMODE_595,       /**< 595                   */
-       DRX_FRAMEMODE_945,       /**< 945 with variable PN  */
+       DRX_FRAMEMODE_420 = 0,   /*< 420 with variable PN  */
+       DRX_FRAMEMODE_595,       /*< 595                   */
+       DRX_FRAMEMODE_945,       /*< 945 with variable PN  */
        DRX_FRAMEMODE_420_FIXED_PN,
-                                       /**< 420 with fixed PN     */
+                                       /*< 420 with fixed PN     */
        DRX_FRAMEMODE_945_FIXED_PN,
-                                       /**< 945 with fixed PN     */
+                                       /*< 945 with fixed PN     */
        DRX_FRAMEMODE_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Frame mode unknown.   */
+                                       /*< Frame mode unknown.   */
        DRX_FRAMEMODE_AUTO = DRX_AUTO
-                                       /**< Autodetect frame mode */
+                                       /*< Autodetect frame mode */
 };
 
-/**
+/*
 * \enum enum drx_tps_frame
 * \brief Frame number in current super-frame.
 */
 enum drx_tps_frame {
-       DRX_TPS_FRAME1 = 0,       /**< TPS frame 1.       */
-       DRX_TPS_FRAME2,           /**< TPS frame 2.       */
-       DRX_TPS_FRAME3,           /**< TPS frame 3.       */
-       DRX_TPS_FRAME4,           /**< TPS frame 4.       */
+       DRX_TPS_FRAME1 = 0,       /*< TPS frame 1.       */
+       DRX_TPS_FRAME2,           /*< TPS frame 2.       */
+       DRX_TPS_FRAME3,           /*< TPS frame 3.       */
+       DRX_TPS_FRAME4,           /*< TPS frame 4.       */
        DRX_TPS_FRAME_UNKNOWN = DRX_UNKNOWN
-                                       /**< TPS frame unknown. */
+                                       /*< TPS frame unknown. */
 };
 
-/**
+/*
 * \enum enum drx_ldpc
 * \brief TPS LDPC .
 */
 enum drx_ldpc {
-       DRX_LDPC_0_4 = 0,         /**< LDPC 0.4           */
-       DRX_LDPC_0_6,             /**< LDPC 0.6           */
-       DRX_LDPC_0_8,             /**< LDPC 0.8           */
+       DRX_LDPC_0_4 = 0,         /*< LDPC 0.4           */
+       DRX_LDPC_0_6,             /*< LDPC 0.6           */
+       DRX_LDPC_0_8,             /*< LDPC 0.8           */
        DRX_LDPC_UNKNOWN = DRX_UNKNOWN,
-                                       /**< LDPC unknown.      */
-       DRX_LDPC_AUTO = DRX_AUTO  /**< Autodetect LDPC    */
+                                       /*< LDPC unknown.      */
+       DRX_LDPC_AUTO = DRX_AUTO  /*< Autodetect LDPC    */
 };
 
-/**
+/*
 * \enum enum drx_pilot_mode
 * \brief Pilot modes in DTMB.
 */
 enum drx_pilot_mode {
-       DRX_PILOT_ON = 0,         /**< Pilot On             */
-       DRX_PILOT_OFF,            /**< Pilot Off            */
+       DRX_PILOT_ON = 0,         /*< Pilot On             */
+       DRX_PILOT_OFF,            /*< Pilot Off            */
        DRX_PILOT_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Pilot unknown.       */
-       DRX_PILOT_AUTO = DRX_AUTO /**< Autodetect Pilot     */
+                                       /*< Pilot unknown.       */
+       DRX_PILOT_AUTO = DRX_AUTO /*< Autodetect Pilot     */
 };
 
-/**
+/*
  * enum drxu_code_action - indicate if firmware has to be uploaded or verified.
  * @UCODE_UPLOAD:      Upload the microcode image to device
  * @UCODE_VERIFY:      Compare microcode image with code on device
@@ -756,7 +756,7 @@ enum drxu_code_action {
        UCODE_VERIFY
 };
 
-/**
+/*
 * \enum enum drx_lock_status * \brief Used to reflect current lock status of demodulator.
 *
 * The generic lock states have device dependent semantics.
@@ -801,7 +801,7 @@ enum drx_lock_status {
        DRX_LOCKED
 };
 
-/**
+/*
 * \enum enum drx_uio* \brief Used to address a User IO (UIO).
 */
 enum drx_uio {
@@ -840,7 +840,7 @@ enum drx_uio {
        DRX_UIO_MAX = DRX_UIO32
 };
 
-/**
+/*
 * \enum enum drxuio_mode * \brief Used to configure the modus oprandi of a UIO.
 *
 * DRX_UIO_MODE_FIRMWARE is an old uio mode.
@@ -850,37 +850,37 @@ enum drx_uio {
 */
 enum drxuio_mode {
        DRX_UIO_MODE_DISABLE = 0x01,
-                           /**< not used, pin is configured as input */
+                           /*< not used, pin is configured as input */
        DRX_UIO_MODE_READWRITE = 0x02,
-                           /**< used for read/write by application   */
+                           /*< used for read/write by application   */
        DRX_UIO_MODE_FIRMWARE = 0x04,
-                           /**< controlled by firmware, function 0   */
+                           /*< controlled by firmware, function 0   */
        DRX_UIO_MODE_FIRMWARE0 = DRX_UIO_MODE_FIRMWARE,
-                                           /**< same as above        */
+                                           /*< same as above        */
        DRX_UIO_MODE_FIRMWARE1 = 0x08,
-                           /**< controlled by firmware, function 1   */
+                           /*< controlled by firmware, function 1   */
        DRX_UIO_MODE_FIRMWARE2 = 0x10,
-                           /**< controlled by firmware, function 2   */
+                           /*< controlled by firmware, function 2   */
        DRX_UIO_MODE_FIRMWARE3 = 0x20,
-                           /**< controlled by firmware, function 3   */
+                           /*< controlled by firmware, function 3   */
        DRX_UIO_MODE_FIRMWARE4 = 0x40,
-                           /**< controlled by firmware, function 4   */
+                           /*< controlled by firmware, function 4   */
        DRX_UIO_MODE_FIRMWARE5 = 0x80
-                           /**< controlled by firmware, function 5   */
+                           /*< controlled by firmware, function 5   */
 };
 
-/**
+/*
 * \enum enum drxoob_downstream_standard * \brief Used to select OOB standard.
 *
 * Based on ANSI 55-1 and 55-2
 */
 enum drxoob_downstream_standard {
        DRX_OOB_MODE_A = 0,
-                      /**< ANSI 55-1   */
+                      /*< ANSI 55-1   */
        DRX_OOB_MODE_B_GRADE_A,
-                      /**< ANSI 55-2 A */
+                      /*< ANSI 55-2 A */
        DRX_OOB_MODE_B_GRADE_B
-                      /**< ANSI 55-2 B */
+                      /*< ANSI 55-2 B */
 };
 
 /*-------------------------------------------------------------------------
@@ -924,7 +924,7 @@ STRUCTS
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
  * struct drxu_code_info       Parameters for microcode upload and verfiy.
  *
  * @mc_file:   microcode file name
@@ -935,7 +935,7 @@ struct drxu_code_info {
        char                    *mc_file;
 };
 
-/**
+/*
 * \struct drx_mc_version_rec_t
 * \brief Microcode version record
 * Version numbers are stored in BCD format, as usual:
@@ -963,7 +963,7 @@ struct drx_mc_version_rec {
 
 /*========================================*/
 
-/**
+/*
 * \struct drx_filter_info_t
 * \brief Parameters for loading filter coefficients
 *
@@ -971,18 +971,18 @@ struct drx_mc_version_rec {
 */
 struct drx_filter_info {
        u8 *data_re;
-             /**< pointer to coefficients for RE */
+             /*< pointer to coefficients for RE */
        u8 *data_im;
-             /**< pointer to coefficients for IM */
+             /*< pointer to coefficients for IM */
        u16 size_re;
-             /**< size of coefficients for RE    */
+             /*< size of coefficients for RE    */
        u16 size_im;
-             /**< size of coefficients for IM    */
+             /*< size of coefficients for IM    */
 };
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drx_channel * \brief The set of parameters describing a single channel.
 *
 * Used by DRX_CTRL_SET_CHANNEL and DRX_CTRL_GET_CHANNEL.
@@ -991,29 +991,29 @@ struct drx_filter_info {
 */
 struct drx_channel {
        s32 frequency;
-                               /**< frequency in kHz                 */
+                               /*< frequency in kHz                 */
        enum drx_bandwidth bandwidth;
-                               /**< bandwidth                        */
-       enum drx_mirror mirror; /**< mirrored or not on RF            */
+                               /*< bandwidth                        */
+       enum drx_mirror mirror; /*< mirrored or not on RF            */
        enum drx_modulation constellation;
-                               /**< constellation                    */
+                               /*< constellation                    */
        enum drx_hierarchy hierarchy;
-                               /**< hierarchy                        */
-       enum drx_priority priority;     /**< priority                         */
-       enum drx_coderate coderate;     /**< coderate                         */
-       enum drx_guard guard;   /**< guard interval                   */
-       enum drx_fft_mode fftmode;      /**< fftmode                          */
+                               /*< hierarchy                        */
+       enum drx_priority priority;     /*< priority                         */
+       enum drx_coderate coderate;     /*< coderate                         */
+       enum drx_guard guard;   /*< guard interval                   */
+       enum drx_fft_mode fftmode;      /*< fftmode                          */
        enum drx_classification classification;
-                               /**< classification                   */
+                               /*< classification                   */
        u32 symbolrate;
-                               /**< symbolrate in symbols/sec        */
+                               /*< symbolrate in symbols/sec        */
        enum drx_interleave_mode interleavemode;
-                               /**< interleaveMode QAM               */
-       enum drx_ldpc ldpc;             /**< ldpc                             */
-       enum drx_carrier_mode carrier;  /**< carrier                          */
+                               /*< interleaveMode QAM               */
+       enum drx_ldpc ldpc;             /*< ldpc                             */
+       enum drx_carrier_mode carrier;  /*< carrier                          */
        enum drx_frame_mode framemode;
-                               /**< frame mode                       */
-       enum drx_pilot_mode pilot;      /**< pilot mode                       */
+                               /*< frame mode                       */
+       enum drx_pilot_mode pilot;      /*< pilot mode                       */
 };
 
 /*========================================*/
@@ -1027,74 +1027,74 @@ enum drx_cfg_sqi_speed {
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drx_complex * A complex number.
 *
 * Used by DRX_CTRL_CONSTEL.
 */
 struct drx_complex {
        s16 im;
-     /**< Imaginary part. */
+     /*< Imaginary part. */
        s16 re;
-     /**< Real part.      */
+     /*< Real part.      */
 };
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drx_frequency_plan * Array element of a frequency plan.
 *
 * Used by DRX_CTRL_SCAN_INIT.
 */
 struct drx_frequency_plan {
        s32 first;
-                    /**< First centre frequency in this band        */
+                    /*< First centre frequency in this band        */
        s32 last;
-                    /**< Last centre frequency in this band         */
+                    /*< Last centre frequency in this band         */
        s32 step;
-                    /**< Stepping frequency in this band            */
+                    /*< Stepping frequency in this band            */
        enum drx_bandwidth bandwidth;
-                    /**< Bandwidth within this frequency band       */
+                    /*< Bandwidth within this frequency band       */
        u16 ch_number;
-                    /**< First channel number in this band, or first
+                    /*< First channel number in this band, or first
                            index in ch_names                         */
        char **ch_names;
-                    /**< Optional list of channel names in this
+                    /*< Optional list of channel names in this
                            band                                     */
 };
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drx_scan_param * Parameters for channel scan.
 *
 * Used by DRX_CTRL_SCAN_INIT.
 */
 struct drx_scan_param {
        struct drx_frequency_plan *frequency_plan;
-                                 /**< Frequency plan (array)*/
-       u16 frequency_plan_size;  /**< Number of bands       */
-       u32 num_tries;            /**< Max channels tried    */
-       s32 skip;         /**< Minimum frequency step to take
+                                 /*< Frequency plan (array)*/
+       u16 frequency_plan_size;  /*< Number of bands       */
+       u32 num_tries;            /*< Max channels tried    */
+       s32 skip;         /*< Minimum frequency step to take
                                        after a channel is found */
-       void *ext_params;         /**< Standard specific params */
+       void *ext_params;         /*< Standard specific params */
 };
 
 /*========================================*/
 
-/**
+/*
 * \brief Scan commands.
 * Used by scanning algorithms.
 */
 enum drx_scan_command {
-               DRX_SCAN_COMMAND_INIT = 0,/**< Initialize scanning */
-               DRX_SCAN_COMMAND_NEXT,    /**< Next scan           */
-               DRX_SCAN_COMMAND_STOP     /**< Stop scanning       */
+               DRX_SCAN_COMMAND_INIT = 0,/*< Initialize scanning */
+               DRX_SCAN_COMMAND_NEXT,    /*< Next scan           */
+               DRX_SCAN_COMMAND_STOP     /*< Stop scanning       */
 };
 
 /*========================================*/
 
-/**
+/*
 * \brief Inner scan function prototype.
 */
 typedef int(*drx_scan_func_t) (void *scan_context,
@@ -1104,77 +1104,77 @@ typedef int(*drx_scan_func_t) (void *scan_context,
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drxtps_info * TPS information, DVB-T specific.
 *
 * Used by DRX_CTRL_TPS_INFO.
 */
        struct drxtps_info {
-               enum drx_fft_mode fftmode;      /**< Fft mode       */
-               enum drx_guard guard;   /**< Guard interval */
+               enum drx_fft_mode fftmode;      /*< Fft mode       */
+               enum drx_guard guard;   /*< Guard interval */
                enum drx_modulation constellation;
-                                       /**< Constellation  */
+                                       /*< Constellation  */
                enum drx_hierarchy hierarchy;
-                                       /**< Hierarchy      */
+                                       /*< Hierarchy      */
                enum drx_coderate high_coderate;
-                                       /**< High code rate */
+                                       /*< High code rate */
                enum drx_coderate low_coderate;
-                                       /**< Low cod rate   */
-               enum drx_tps_frame frame;       /**< Tps frame      */
-               u8 length;              /**< Length         */
-               u16 cell_id;            /**< Cell id        */
+                                       /*< Low cod rate   */
+               enum drx_tps_frame frame;       /*< Tps frame      */
+               u8 length;              /*< Length         */
+               u16 cell_id;            /*< Cell id        */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Power mode of device.
 *
 * Used by DRX_CTRL_SET_POWER_MODE.
 */
        enum drx_power_mode {
                DRX_POWER_UP = 0,
-                        /**< Generic         , Power Up Mode   */
+                        /*< Generic         , Power Up Mode   */
                DRX_POWER_MODE_1,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_2,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_3,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_4,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_5,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_6,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_7,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_8,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
 
                DRX_POWER_MODE_9,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_10,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_11,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_12,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_13,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_14,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_15,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_16,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_DOWN = 255
-                        /**< Generic         , Power Down Mode */
+                        /*< Generic         , Power Down Mode */
        };
 
 /*========================================*/
 
-/**
+/*
 * \enum enum drx_module * \brief Software module identification.
 *
 * Used by DRX_CTRL_VERSION.
@@ -1191,93 +1191,93 @@ typedef int(*drx_scan_func_t) (void *scan_context,
                DRX_MODULE_UNKNOWN
        };
 
-/**
+/*
 * \enum struct drx_version * \brief Version information of one software module.
 *
 * Used by DRX_CTRL_VERSION.
 */
        struct drx_version {
                enum drx_module module_type;
-                              /**< Type identifier of the module */
+                              /*< Type identifier of the module */
                char *module_name;
-                              /**< Name or description of module */
-               u16 v_major;  /**< Major version number          */
-               u16 v_minor;  /**< Minor version number          */
-               u16 v_patch;  /**< Patch version number          */
-               char *v_string; /**< Version as text string        */
+                              /*< Name or description of module */
+               u16 v_major;  /*< Major version number          */
+               u16 v_minor;  /*< Minor version number          */
+               u16 v_patch;  /*< Patch version number          */
+               char *v_string; /*< Version as text string        */
        };
 
-/**
+/*
 * \enum struct drx_version_list * \brief List element of NULL terminated, linked list for version information.
 *
 * Used by DRX_CTRL_VERSION.
 */
 struct drx_version_list {
-       struct drx_version *version;/**< Version information */
+       struct drx_version *version;/*< Version information */
        struct drx_version_list *next;
-                             /**< Next list element   */
+                             /*< Next list element   */
 };
 
 /*========================================*/
 
-/**
+/*
 * \brief Parameters needed to confiugure a UIO.
 *
 * Used by DRX_CTRL_UIO_CFG.
 */
        struct drxuio_cfg {
                enum drx_uio uio;
-                      /**< UIO identifier       */
+                      /*< UIO identifier       */
                enum drxuio_mode mode;
-                      /**< UIO operational mode */
+                      /*< UIO operational mode */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Parameters needed to read from or write to a UIO.
 *
 * Used by DRX_CTRL_UIO_READ and DRX_CTRL_UIO_WRITE.
 */
        struct drxuio_data {
                enum drx_uio uio;
-                  /**< UIO identifier              */
+                  /*< UIO identifier              */
                bool value;
-                  /**< UIO value (true=1, false=0) */
+                  /*< UIO value (true=1, false=0) */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Parameters needed to configure OOB.
 *
 * Used by DRX_CTRL_SET_OOB.
 */
        struct drxoob {
-               s32 frequency;     /**< Frequency in kHz      */
+               s32 frequency;     /*< Frequency in kHz      */
                enum drxoob_downstream_standard standard;
-                                                  /**< OOB standard          */
-               bool spectrum_inverted;    /**< If true, then spectrum
+                                                  /*< OOB standard          */
+               bool spectrum_inverted;    /*< If true, then spectrum
                                                         is inverted          */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Metrics from OOB.
 *
 * Used by DRX_CTRL_GET_OOB.
 */
        struct drxoob_status {
-               s32 frequency; /**< Frequency in Khz         */
-               enum drx_lock_status lock;        /**< Lock status              */
-               u32 mer;                  /**< MER                      */
-               s32 symbol_rate_offset;   /**< Symbolrate offset in ppm */
+               s32 frequency; /*< Frequency in Khz         */
+               enum drx_lock_status lock;        /*< Lock status              */
+               u32 mer;                  /*< MER                      */
+               s32 symbol_rate_offset;   /*< Symbolrate offset in ppm */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Device dependent configuration data.
 *
 * Used by DRX_CTRL_SET_CFG and DRX_CTRL_GET_CFG.
@@ -1285,14 +1285,14 @@ struct drx_version_list {
 */
        struct drx_cfg {
                u32 cfg_type;
-                         /**< Function identifier */
+                         /*< Function identifier */
                void *cfg_data;
-                         /**< Function data */
+                         /*< Function data */
        };
 
 /*========================================*/
 
-/**
+/*
 * /struct DRXMpegStartWidth_t
 * MStart width [nr MCLK cycles] for serial MPEG output.
 */
@@ -1303,7 +1303,7 @@ struct drx_version_list {
        };
 
 /* CTRL CFG MPEG output */
-/**
+/*
 * \struct struct drx_cfg_mpeg_output * \brief Configuration parameters for MPEG output control.
 *
 * Used by DRX_CFG_MPEG_OUTPUT, in combination with DRX_CTRL_SET_CFG and
@@ -1311,29 +1311,29 @@ struct drx_version_list {
 */
 
        struct drx_cfg_mpeg_output {
-               bool enable_mpeg_output;/**< If true, enable MPEG output      */
-               bool insert_rs_byte;    /**< If true, insert RS byte          */
-               bool enable_parallel;   /**< If true, parallel out otherwise
+               bool enable_mpeg_output;/*< If true, enable MPEG output      */
+               bool insert_rs_byte;    /*< If true, insert RS byte          */
+               bool enable_parallel;   /*< If true, parallel out otherwise
                                                                     serial   */
-               bool invert_data;       /**< If true, invert DATA signals     */
-               bool invert_err;        /**< If true, invert ERR signal       */
-               bool invert_str;        /**< If true, invert STR signals      */
-               bool invert_val;        /**< If true, invert VAL signals      */
-               bool invert_clk;        /**< If true, invert CLK signals      */
-               bool static_clk;        /**< If true, static MPEG clockrate
+               bool invert_data;       /*< If true, invert DATA signals     */
+               bool invert_err;        /*< If true, invert ERR signal       */
+               bool invert_str;        /*< If true, invert STR signals      */
+               bool invert_val;        /*< If true, invert VAL signals      */
+               bool invert_clk;        /*< If true, invert CLK signals      */
+               bool static_clk;        /*< If true, static MPEG clockrate
                                             will be used, otherwise clockrate
                                             will adapt to the bitrate of the
                                             TS                               */
-               u32 bitrate;            /**< Maximum bitrate in b/s in case
+               u32 bitrate;            /*< Maximum bitrate in b/s in case
                                             static clockrate is selected     */
                enum drxmpeg_str_width width_str;
-                                       /**< MPEG start width                 */
+                                       /*< MPEG start width                 */
        };
 
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drxi2c_data * \brief Data for I2C via 2nd or 3rd or etc I2C port.
 *
 * Used by DRX_CTRL_I2C_READWRITE.
@@ -1341,187 +1341,187 @@ struct drx_version_list {
 *
 */
        struct drxi2c_data {
-               u16 port_nr;    /**< I2C port number               */
+               u16 port_nr;    /*< I2C port number               */
                struct i2c_device_addr *w_dev_addr;
-                               /**< Write device address          */
-               u16 w_count;    /**< Size of write data in bytes   */
-               u8 *wData;      /**< Pointer to write data         */
+                               /*< Write device address          */
+               u16 w_count;    /*< Size of write data in bytes   */
+               u8 *wData;      /*< Pointer to write data         */
                struct i2c_device_addr *r_dev_addr;
-                               /**< Read device address           */
-               u16 r_count;    /**< Size of data to read in bytes */
-               u8 *r_data;     /**< Pointer to read buffer        */
+                               /*< Read device address           */
+               u16 r_count;    /*< Size of data to read in bytes */
+               u8 *r_data;     /*< Pointer to read buffer        */
        };
 
 /*========================================*/
 
-/**
+/*
 * \enum enum drx_aud_standard * \brief Audio standard identifier.
 *
 * Used by DRX_CTRL_SET_AUD.
 */
        enum drx_aud_standard {
-               DRX_AUD_STANDARD_BTSC,     /**< set BTSC standard (USA)       */
-               DRX_AUD_STANDARD_A2,       /**< set A2-Korea FM Stereo        */
-               DRX_AUD_STANDARD_EIAJ,     /**< set to Japanese FM Stereo     */
-               DRX_AUD_STANDARD_FM_STEREO,/**< set to FM-Stereo Radio        */
-               DRX_AUD_STANDARD_M_MONO,   /**< for 4.5 MHz mono detected     */
-               DRX_AUD_STANDARD_D_K_MONO, /**< for 6.5 MHz mono detected     */
-               DRX_AUD_STANDARD_BG_FM,    /**< set BG_FM standard            */
-               DRX_AUD_STANDARD_D_K1,     /**< set D_K1 standard             */
-               DRX_AUD_STANDARD_D_K2,     /**< set D_K2 standard             */
-               DRX_AUD_STANDARD_D_K3,     /**< set D_K3 standard             */
+               DRX_AUD_STANDARD_BTSC,     /*< set BTSC standard (USA)       */
+               DRX_AUD_STANDARD_A2,       /*< set A2-Korea FM Stereo        */
+               DRX_AUD_STANDARD_EIAJ,     /*< set to Japanese FM Stereo     */
+               DRX_AUD_STANDARD_FM_STEREO,/*< set to FM-Stereo Radio        */
+               DRX_AUD_STANDARD_M_MONO,   /*< for 4.5 MHz mono detected     */
+               DRX_AUD_STANDARD_D_K_MONO, /*< for 6.5 MHz mono detected     */
+               DRX_AUD_STANDARD_BG_FM,    /*< set BG_FM standard            */
+               DRX_AUD_STANDARD_D_K1,     /*< set D_K1 standard             */
+               DRX_AUD_STANDARD_D_K2,     /*< set D_K2 standard             */
+               DRX_AUD_STANDARD_D_K3,     /*< set D_K3 standard             */
                DRX_AUD_STANDARD_BG_NICAM_FM,
-                                          /**< set BG_NICAM_FM standard      */
+                                          /*< set BG_NICAM_FM standard      */
                DRX_AUD_STANDARD_L_NICAM_AM,
-                                          /**< set L_NICAM_AM standard       */
+                                          /*< set L_NICAM_AM standard       */
                DRX_AUD_STANDARD_I_NICAM_FM,
-                                          /**< set I_NICAM_FM standard       */
+                                          /*< set I_NICAM_FM standard       */
                DRX_AUD_STANDARD_D_K_NICAM_FM,
-                                          /**< set D_K_NICAM_FM standard     */
-               DRX_AUD_STANDARD_NOT_READY,/**< used to detect audio standard */
+                                          /*< set D_K_NICAM_FM standard     */
+               DRX_AUD_STANDARD_NOT_READY,/*< used to detect audio standard */
                DRX_AUD_STANDARD_AUTO = DRX_AUTO,
-                                          /**< Automatic Standard Detection  */
+                                          /*< Automatic Standard Detection  */
                DRX_AUD_STANDARD_UNKNOWN = DRX_UNKNOWN
-                                          /**< used as auto and for readback */
+                                          /*< used as auto and for readback */
        };
 
 /* CTRL_AUD_GET_STATUS    - struct drx_aud_status */
-/**
+/*
 * \enum enum drx_aud_nicam_status * \brief Status of NICAM carrier.
 */
        enum drx_aud_nicam_status {
                DRX_AUD_NICAM_DETECTED = 0,
-                                         /**< NICAM carrier detected         */
+                                         /*< NICAM carrier detected         */
                DRX_AUD_NICAM_NOT_DETECTED,
-                                         /**< NICAM carrier not detected     */
-               DRX_AUD_NICAM_BAD         /**< NICAM carrier bad quality      */
+                                         /*< NICAM carrier not detected     */
+               DRX_AUD_NICAM_BAD         /*< NICAM carrier bad quality      */
        };
 
-/**
+/*
 * \struct struct drx_aud_status * \brief Audio status characteristics.
 */
        struct drx_aud_status {
-               bool stereo;              /**< stereo detection               */
-               bool carrier_a;   /**< carrier A detected             */
-               bool carrier_b;   /**< carrier B detected             */
-               bool sap;                 /**< sap / bilingual detection      */
-               bool rds;                 /**< RDS data array present         */
+               bool stereo;              /*< stereo detection               */
+               bool carrier_a;   /*< carrier A detected             */
+               bool carrier_b;   /*< carrier B detected             */
+               bool sap;                 /*< sap / bilingual detection      */
+               bool rds;                 /*< RDS data array present         */
                enum drx_aud_nicam_status nicam_status;
-                                         /**< status of NICAM carrier        */
-               s8 fm_ident;              /**< FM Identification value        */
+                                         /*< status of NICAM carrier        */
+               s8 fm_ident;              /*< FM Identification value        */
        };
 
 /* CTRL_AUD_READ_RDS       - DRXRDSdata_t */
 
-/**
+/*
 * \struct DRXRDSdata_t
 * \brief Raw RDS data array.
 */
        struct drx_cfg_aud_rds {
-               bool valid;               /**< RDS data validation            */
-               u16 data[18];             /**< data from one RDS data array   */
+               bool valid;               /*< RDS data validation            */
+               u16 data[18];             /*< data from one RDS data array   */
        };
 
 /* DRX_CFG_AUD_VOLUME      - struct drx_cfg_aud_volume - set/get */
-/**
+/*
 * \enum DRXAudAVCDecayTime_t
 * \brief Automatic volume control configuration.
 */
        enum drx_aud_avc_mode {
-               DRX_AUD_AVC_OFF,          /**< Automatic volume control off   */
-               DRX_AUD_AVC_DECAYTIME_8S, /**< level volume in  8 seconds     */
-               DRX_AUD_AVC_DECAYTIME_4S, /**< level volume in  4 seconds     */
-               DRX_AUD_AVC_DECAYTIME_2S, /**< level volume in  2 seconds     */
-               DRX_AUD_AVC_DECAYTIME_20MS/**< level volume in 20 millisec    */
+               DRX_AUD_AVC_OFF,          /*< Automatic volume control off   */
+               DRX_AUD_AVC_DECAYTIME_8S, /*< level volume in  8 seconds     */
+               DRX_AUD_AVC_DECAYTIME_4S, /*< level volume in  4 seconds     */
+               DRX_AUD_AVC_DECAYTIME_2S, /*< level volume in  2 seconds     */
+               DRX_AUD_AVC_DECAYTIME_20MS/*< level volume in 20 millisec    */
        };
 
-/**
+/*
 * /enum DRXAudMaxAVCGain_t
 * /brief Automatic volume control max gain in audio baseband.
 */
        enum drx_aud_avc_max_gain {
-               DRX_AUD_AVC_MAX_GAIN_0DB, /**< maximum AVC gain  0 dB         */
-               DRX_AUD_AVC_MAX_GAIN_6DB, /**< maximum AVC gain  6 dB         */
-               DRX_AUD_AVC_MAX_GAIN_12DB /**< maximum AVC gain 12 dB         */
+               DRX_AUD_AVC_MAX_GAIN_0DB, /*< maximum AVC gain  0 dB         */
+               DRX_AUD_AVC_MAX_GAIN_6DB, /*< maximum AVC gain  6 dB         */
+               DRX_AUD_AVC_MAX_GAIN_12DB /*< maximum AVC gain 12 dB         */
        };
 
-/**
+/*
 * /enum DRXAudMaxAVCAtten_t
 * /brief Automatic volume control max attenuation in audio baseband.
 */
        enum drx_aud_avc_max_atten {
                DRX_AUD_AVC_MAX_ATTEN_12DB,
-                                         /**< maximum AVC attenuation 12 dB  */
+                                         /*< maximum AVC attenuation 12 dB  */
                DRX_AUD_AVC_MAX_ATTEN_18DB,
-                                         /**< maximum AVC attenuation 18 dB  */
-               DRX_AUD_AVC_MAX_ATTEN_24DB/**< maximum AVC attenuation 24 dB  */
+                                         /*< maximum AVC attenuation 18 dB  */
+               DRX_AUD_AVC_MAX_ATTEN_24DB/*< maximum AVC attenuation 24 dB  */
        };
-/**
+/*
 * \struct struct drx_cfg_aud_volume * \brief Audio volume configuration.
 */
        struct drx_cfg_aud_volume {
-               bool mute;                /**< mute overrides volume setting  */
-               s16 volume;               /**< volume, range -114 to 12 dB    */
-               enum drx_aud_avc_mode avc_mode;  /**< AVC auto volume control mode   */
-               u16 avc_ref_level;        /**< AVC reference level            */
+               bool mute;                /*< mute overrides volume setting  */
+               s16 volume;               /*< volume, range -114 to 12 dB    */
+               enum drx_aud_avc_mode avc_mode;  /*< AVC auto volume control mode   */
+               u16 avc_ref_level;        /*< AVC reference level            */
                enum drx_aud_avc_max_gain avc_max_gain;
-                                         /**< AVC max gain selection         */
+                                         /*< AVC max gain selection         */
                enum drx_aud_avc_max_atten avc_max_atten;
-                                         /**< AVC max attenuation selection  */
-               s16 strength_left;        /**< quasi-peak, left speaker       */
-               s16 strength_right;       /**< quasi-peak, right speaker      */
+                                         /*< AVC max attenuation selection  */
+               s16 strength_left;        /*< quasi-peak, left speaker       */
+               s16 strength_right;       /*< quasi-peak, right speaker      */
        };
 
 /* DRX_CFG_I2S_OUTPUT      - struct drx_cfg_i2s_output - set/get */
-/**
+/*
 * \enum enum drxi2s_mode * \brief I2S output mode.
 */
        enum drxi2s_mode {
-               DRX_I2S_MODE_MASTER,      /**< I2S is in master mode          */
-               DRX_I2S_MODE_SLAVE        /**< I2S is in slave mode           */
+               DRX_I2S_MODE_MASTER,      /*< I2S is in master mode          */
+               DRX_I2S_MODE_SLAVE        /*< I2S is in slave mode           */
        };
 
-/**
+/*
 * \enum enum drxi2s_word_length * \brief Width of I2S data.
 */
        enum drxi2s_word_length {
-               DRX_I2S_WORDLENGTH_32 = 0,/**< I2S data is 32 bit wide        */
-               DRX_I2S_WORDLENGTH_16 = 1 /**< I2S data is 16 bit wide        */
+               DRX_I2S_WORDLENGTH_32 = 0,/*< I2S data is 32 bit wide        */
+               DRX_I2S_WORDLENGTH_16 = 1 /*< I2S data is 16 bit wide        */
        };
 
-/**
+/*
 * \enum enum drxi2s_format * \brief Data wordstrobe alignment for I2S.
 */
        enum drxi2s_format {
                DRX_I2S_FORMAT_WS_WITH_DATA,
-                                   /**< I2S data and wordstrobe are aligned  */
+                                   /*< I2S data and wordstrobe are aligned  */
                DRX_I2S_FORMAT_WS_ADVANCED
-                                   /**< I2S data one cycle after wordstrobe  */
+                                   /*< I2S data one cycle after wordstrobe  */
        };
 
-/**
+/*
 * \enum enum drxi2s_polarity * \brief Polarity of I2S data.
 */
        enum drxi2s_polarity {
-               DRX_I2S_POLARITY_RIGHT,/**< wordstrobe - right high, left low */
-               DRX_I2S_POLARITY_LEFT  /**< wordstrobe - right low, left high */
+               DRX_I2S_POLARITY_RIGHT,/*< wordstrobe - right high, left low */
+               DRX_I2S_POLARITY_LEFT  /*< wordstrobe - right low, left high */
        };
 
-/**
+/*
 * \struct struct drx_cfg_i2s_output * \brief I2S output configuration.
 */
        struct drx_cfg_i2s_output {
-               bool output_enable;       /**< I2S output enable              */
-               u32 frequency;    /**< range from 8000-48000 Hz       */
-               enum drxi2s_mode mode;    /**< I2S mode, master or slave      */
+               bool output_enable;       /*< I2S output enable              */
+               u32 frequency;    /*< range from 8000-48000 Hz       */
+               enum drxi2s_mode mode;    /*< I2S mode, master or slave      */
                enum drxi2s_word_length word_length;
-                                         /**< I2S wordlength, 16 or 32 bits  */
-               enum drxi2s_polarity polarity;/**< I2S wordstrobe polarity        */
-               enum drxi2s_format format;        /**< I2S wordstrobe delay to data   */
+                                         /*< I2S wordlength, 16 or 32 bits  */
+               enum drxi2s_polarity polarity;/*< I2S wordstrobe polarity        */
+               enum drxi2s_format format;        /*< I2S wordstrobe delay to data   */
        };
 
 /* ------------------------------expert interface-----------------------------*/
-/**
+/*
 * /enum enum drx_aud_fm_deemphasis * setting for FM-Deemphasis in audio demodulator.
 *
 */
@@ -1531,7 +1531,7 @@ struct drx_version_list {
                DRX_AUD_FM_DEEMPH_OFF
        };
 
-/**
+/*
 * /enum DRXAudDeviation_t
 * setting for deviation mode in audio demodulator.
 *
@@ -1541,7 +1541,7 @@ struct drx_version_list {
                DRX_AUD_DEVIATION_HIGH
        };
 
-/**
+/*
 * /enum enum drx_no_carrier_option * setting for carrier, mute/noise.
 *
 */
@@ -1550,7 +1550,7 @@ struct drx_version_list {
                DRX_NO_CARRIER_NOISE
        };
 
-/**
+/*
 * \enum DRXAudAutoSound_t
 * \brief Automatic Sound
 */
@@ -1560,7 +1560,7 @@ struct drx_version_list {
                DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_OFF
        };
 
-/**
+/*
 * \enum DRXAudASSThres_t
 * \brief Automatic Sound Select Thresholds
 */
@@ -1570,7 +1570,7 @@ struct drx_version_list {
                u16 nicam;      /* Nicam Threshold for ASS configuration */
        };
 
-/**
+/*
 * \struct struct drx_aud_carrier * \brief Carrier detection related parameters
 */
        struct drx_aud_carrier {
@@ -1580,7 +1580,7 @@ struct drx_version_list {
                s32 dco;        /* frequency adjustment (A) */
        };
 
-/**
+/*
 * \struct struct drx_cfg_aud_carriers * \brief combining carrier A & B to one struct
 */
        struct drx_cfg_aud_carriers {
@@ -1588,7 +1588,7 @@ struct drx_version_list {
                struct drx_aud_carrier b;
        };
 
-/**
+/*
 * /enum enum drx_aud_i2s_src * Selection of audio source
 */
        enum drx_aud_i2s_src {
@@ -1597,19 +1597,19 @@ struct drx_version_list {
                DRX_AUD_SRC_STEREO_OR_A,
                DRX_AUD_SRC_STEREO_OR_B};
 
-/**
+/*
 * \enum enum drx_aud_i2s_matrix * \brief Used for selecting I2S output.
 */
        enum drx_aud_i2s_matrix {
                DRX_AUD_I2S_MATRIX_A_MONO,
-                                       /**< A sound only, stereo or mono     */
+                                       /*< A sound only, stereo or mono     */
                DRX_AUD_I2S_MATRIX_B_MONO,
-                                       /**< B sound only, stereo or mono     */
+                                       /*< B sound only, stereo or mono     */
                DRX_AUD_I2S_MATRIX_STEREO,
-                                       /**< A+B sound, transparant           */
-               DRX_AUD_I2S_MATRIX_MONO /**< A+B mixed to mono sum, (L+R)/2   */};
+                                       /*< A+B sound, transparant           */
+               DRX_AUD_I2S_MATRIX_MONO /*< A+B mixed to mono sum, (L+R)/2   */};
 
-/**
+/*
 * /enum enum drx_aud_fm_matrix * setting for FM-Matrix in audio demodulator.
 *
 */
@@ -1620,7 +1620,7 @@ struct drx_version_list {
                DRX_AUD_FM_MATRIX_SOUND_A,
                DRX_AUD_FM_MATRIX_SOUND_B};
 
-/**
+/*
 * \struct DRXAudMatrices_t
 * \brief Mixer settings
 */
@@ -1630,22 +1630,22 @@ struct drx_cfg_aud_mixer {
        enum drx_aud_fm_matrix matrix_fm;
 };
 
-/**
+/*
 * \enum DRXI2SVidSync_t
 * \brief Audio/video synchronization, interacts with I2S mode.
 * AUTO_1 and AUTO_2 are for automatic video standard detection with preference
 * for NTSC or Monochrome, because the frequencies are too close (59.94 & 60 Hz)
 */
        enum drx_cfg_aud_av_sync {
-               DRX_AUD_AVSYNC_OFF,/**< audio/video synchronization is off   */
+               DRX_AUD_AVSYNC_OFF,/*< audio/video synchronization is off   */
                DRX_AUD_AVSYNC_NTSC,
-                                  /**< it is an NTSC system                 */
+                                  /*< it is an NTSC system                 */
                DRX_AUD_AVSYNC_MONOCHROME,
-                                  /**< it is a MONOCHROME system            */
+                                  /*< it is a MONOCHROME system            */
                DRX_AUD_AVSYNC_PAL_SECAM
-                                  /**< it is a PAL/SECAM system             */};
+                                  /*< it is a PAL/SECAM system             */};
 
-/**
+/*
 * \struct struct drx_cfg_aud_prescale * \brief Prescalers
 */
 struct drx_cfg_aud_prescale {
@@ -1653,7 +1653,7 @@ struct drx_cfg_aud_prescale {
        s16 nicam_gain;
 };
 
-/**
+/*
 * \struct struct drx_aud_beep * \brief Beep
 */
 struct drx_aud_beep {
@@ -1662,14 +1662,14 @@ struct drx_aud_beep {
        bool mute;
 };
 
-/**
+/*
 * \enum enum drx_aud_btsc_detect * \brief BTSC detetcion mode
 */
        enum drx_aud_btsc_detect {
                DRX_BTSC_STEREO,
                DRX_BTSC_MONO_AND_SAP};
 
-/**
+/*
 * \struct struct drx_aud_data * \brief Audio data structure
 */
 struct drx_aud_data {
@@ -1692,7 +1692,7 @@ struct drx_aud_data {
        bool rds_data_present;
 };
 
-/**
+/*
 * \enum enum drx_qam_lock_range * \brief QAM lock range mode
 */
        enum drx_qam_lock_range {
@@ -1782,7 +1782,7 @@ struct drx_aud_data {
                                                             u32 wdata, /* data to write               */
                                                             u32 *rdata);       /* data to read                */
 
-/**
+/*
 * \struct struct drx_access_func * \brief Interface to an access protocol.
 */
 struct drx_access_func {
@@ -1811,85 +1811,85 @@ struct drx_reg_dump {
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \struct struct drx_common_attr * \brief Set of common attributes, shared by all DRX devices.
 */
        struct drx_common_attr {
                /* Microcode (firmware) attributes */
-               char *microcode_file;   /**<  microcode filename           */
+               char *microcode_file;   /*<  microcode filename           */
                bool verify_microcode;
-                                  /**< Use microcode verify or not.          */
+                                  /*< Use microcode verify or not.          */
                struct drx_mc_version_rec mcversion;
-                                  /**< Version record of microcode from file */
+                                  /*< Version record of microcode from file */
 
                /* Clocks and tuner attributes */
                s32 intermediate_freq;
-                                    /**< IF,if tuner instance not used. (kHz)*/
+                                    /*< IF,if tuner instance not used. (kHz)*/
                s32 sys_clock_freq;
-                                    /**< Systemclock frequency.  (kHz)       */
+                                    /*< Systemclock frequency.  (kHz)       */
                s32 osc_clock_freq;
-                                    /**< Oscillator clock frequency.  (kHz)  */
+                                    /*< Oscillator clock frequency.  (kHz)  */
                s16 osc_clock_deviation;
-                                    /**< Oscillator clock deviation.  (ppm)  */
+                                    /*< Oscillator clock deviation.  (ppm)  */
                bool mirror_freq_spect;
-                                    /**< Mirror IF frequency spectrum or not.*/
+                                    /*< Mirror IF frequency spectrum or not.*/
 
                /* Initial MPEG output attributes */
                struct drx_cfg_mpeg_output mpeg_cfg;
-                                    /**< MPEG configuration                  */
+                                    /*< MPEG configuration                  */
 
-               bool is_opened;     /**< if true instance is already opened. */
+               bool is_opened;     /*< if true instance is already opened. */
 
                /* Channel scan */
                struct drx_scan_param *scan_param;
-                                     /**< scan parameters                    */
+                                     /*< scan parameters                    */
                u16 scan_freq_plan_index;
-                                     /**< next index in freq plan            */
+                                     /*< next index in freq plan            */
                s32 scan_next_frequency;
-                                     /**< next freq to scan                  */
-               bool scan_ready;     /**< scan ready flag                    */
-               u32 scan_max_channels;/**< number of channels in freqplan     */
+                                     /*< next freq to scan                  */
+               bool scan_ready;     /*< scan ready flag                    */
+               u32 scan_max_channels;/*< number of channels in freqplan     */
                u32 scan_channels_scanned;
-                                       /**< number of channels scanned       */
+                                       /*< number of channels scanned       */
                /* Channel scan - inner loop: demod related */
                drx_scan_func_t scan_function;
-                                     /**< function to check channel          */
+                                     /*< function to check channel          */
                /* Channel scan - inner loop: SYSObj related */
-               void *scan_context;    /**< Context Pointer of SYSObj          */
+               void *scan_context;    /*< Context Pointer of SYSObj          */
                /* Channel scan - parameters for default DTV scan function in core driver  */
                u16 scan_demod_lock_timeout;
-                                        /**< millisecs to wait for lock      */
+                                        /*< millisecs to wait for lock      */
                enum drx_lock_status scan_desired_lock;
-                                     /**< lock requirement for channel found */
+                                     /*< lock requirement for channel found */
                /* scan_active can be used by SetChannel to decide how to program the tuner,
                   fast or slow (but stable). Usually fast during scan. */
-               bool scan_active;    /**< true when scan routines are active */
+               bool scan_active;    /*< true when scan routines are active */
 
                /* Power management */
                enum drx_power_mode current_power_mode;
-                                     /**< current power management mode      */
+                                     /*< current power management mode      */
 
                /* Tuner */
-               u8 tuner_port_nr;     /**< nr of I2C port to wich tuner is    */
+               u8 tuner_port_nr;     /*< nr of I2C port to wich tuner is    */
                s32 tuner_min_freq_rf;
-                                     /**< minimum RF input frequency, in kHz */
+                                     /*< minimum RF input frequency, in kHz */
                s32 tuner_max_freq_rf;
-                                     /**< maximum RF input frequency, in kHz */
-               bool tuner_rf_agc_pol; /**< if true invert RF AGC polarity     */
-               bool tuner_if_agc_pol; /**< if true invert IF AGC polarity     */
-               bool tuner_slow_mode; /**< if true invert IF AGC polarity     */
+                                     /*< maximum RF input frequency, in kHz */
+               bool tuner_rf_agc_pol; /*< if true invert RF AGC polarity     */
+               bool tuner_if_agc_pol; /*< if true invert IF AGC polarity     */
+               bool tuner_slow_mode; /*< if true invert IF AGC polarity     */
 
                struct drx_channel current_channel;
-                                     /**< current channel parameters         */
+                                     /*< current channel parameters         */
                enum drx_standard current_standard;
-                                     /**< current standard selection         */
+                                     /*< current standard selection         */
                enum drx_standard prev_standard;
-                                     /**< previous standard selection        */
+                                     /*< previous standard selection        */
                enum drx_standard di_cache_standard;
-                                     /**< standard in DI cache if available  */
-               bool use_bootloader; /**< use bootloader in open             */
-               u32 capabilities;   /**< capabilities flags                 */
-               u32 product_id;      /**< product ID inc. metal fix number   */};
+                                     /*< standard in DI cache if available  */
+               bool use_bootloader; /*< use bootloader in open             */
+               u32 capabilities;   /*< capabilities flags                 */
+               u32 product_id;      /*< product ID inc. metal fix number   */};
 
 /*
 * Generic functions for DRX devices.
@@ -1897,16 +1897,16 @@ struct drx_reg_dump {
 
 struct drx_demod_instance;
 
-/**
+/*
 * \struct struct drx_demod_instance * \brief Top structure of demodulator instance.
 */
 struct drx_demod_instance {
-                               /**< data access protocol functions       */
+                               /*< data access protocol functions       */
        struct i2c_device_addr *my_i2c_dev_addr;
-                               /**< i2c address and device identifier    */
+                               /*< i2c address and device identifier    */
        struct drx_common_attr *my_common_attr;
-                               /**< common DRX attributes                */
-       void *my_ext_attr;    /**< device specific attributes           */
+                               /*< common DRX attributes                */
+       void *my_ext_attr;    /*< device specific attributes           */
        /* generic demodulator data */
 
        struct i2c_adapter      *i2c;
@@ -2195,7 +2195,7 @@ Conversion from enum values to human readable form.
 Access macros
 -------------------------------------------------------------------------*/
 
-/**
+/*
 * \brief Create a compilable reference to the microcode attribute
 * \param d pointer to demod instance
 *
@@ -2229,7 +2229,7 @@ Access macros
 #define DRX_ATTR_I2CDEVID(d)        ((d)->my_i2c_dev_addr->i2c_dev_id)
 #define DRX_ISMCVERTYPE(x) ((x) == AUX_VER_RECORD)
 
-/**************************/
+/*************************/
 
 /* Macros with device-specific handling are converted to CFG functions */
 
@@ -2285,7 +2285,7 @@ Access macros
 #define DRX_GET_QAM_LOCKRANGE(d, x) DRX_ACCESSMACRO_GET((d), (x), \
         DRX_XS_CFG_QAM_LOCKRANGE, enum drx_qam_lock_range, DRX_UNKNOWN)
 
-/**
+/*
 * \brief Macro to check if std is an ATV standard
 * \retval true std is an ATV standard
 * \retval false std is an ATV standard
@@ -2298,7 +2298,7 @@ Access macros
                              ((std) == DRX_STANDARD_NTSC) || \
                              ((std) == DRX_STANDARD_FM))
 
-/**
+/*
 * \brief Macro to check if std is an QAM standard
 * \retval true std is an QAM standards
 * \retval false std is an QAM standards
@@ -2308,14 +2308,14 @@ Access macros
                              ((std) == DRX_STANDARD_ITU_C) || \
                              ((std) == DRX_STANDARD_ITU_D))
 
-/**
+/*
 * \brief Macro to check if std is VSB standard
 * \retval true std is VSB standard
 * \retval false std is not VSB standard
 */
 #define DRX_ISVSBSTD(std) ((std) == DRX_STANDARD_8VSB)
 
-/**
+/*
 * \brief Macro to check if std is DVBT standard
 * \retval true std is DVBT standard
 * \retval false std is not DVBT standard
index 499ccff..8cbd8cc 100644 (file)
@@ -73,7 +73,7 @@ INCLUDE FILES
 
 #define DRX39XX_MAIN_FIRMWARE "dvb-fe-drxj-mc-1.0.8.fw"
 
-/**
+/*
 * \brief Maximum u32 value.
 */
 #ifndef MAX_U32
@@ -100,8 +100,8 @@ INCLUDE FILES
 #ifndef OOB_DRX_DRIVE_STRENGTH
 #define OOB_DRX_DRIVE_STRENGTH 0x02
 #endif
-/**** START DJCOMBO patches to DRXJ registermap constants *********************/
-/**** registermap 200706071303 from drxj **************************************/
+/*** START DJCOMBO patches to DRXJ registermap constants *********************/
+/*** registermap 200706071303 from drxj **************************************/
 #define   ATV_TOP_CR_AMP_TH_FM                                              0x0
 #define   ATV_TOP_CR_AMP_TH_L                                               0xA
 #define   ATV_TOP_CR_AMP_TH_LP                                              0xA
@@ -188,7 +188,7 @@ INCLUDE FILES
 #define     IQM_RC_ADJ_SEL_B_OFF                                            0x0
 #define     IQM_RC_ADJ_SEL_B_QAM                                            0x1
 #define     IQM_RC_ADJ_SEL_B_VSB                                            0x2
-/**** END DJCOMBO patches to DRXJ registermap *********************************/
+/*** END DJCOMBO patches to DRXJ registermap *********************************/
 
 #include "drx_driver_version.h"
 
@@ -208,25 +208,25 @@ DEFINES
 #define DRXJ_WAKE_UP_KEY (demod->my_i2c_dev_addr->i2c_addr)
 #endif
 
-/**
+/*
 * \def DRXJ_DEF_I2C_ADDR
 * \brief Default I2C address of a demodulator instance.
 */
 #define DRXJ_DEF_I2C_ADDR (0x52)
 
-/**
+/*
 * \def DRXJ_DEF_DEMOD_DEV_ID
 * \brief Default device identifier of a demodultor instance.
 */
 #define DRXJ_DEF_DEMOD_DEV_ID      (1)
 
-/**
+/*
 * \def DRXJ_SCAN_TIMEOUT
 * \brief Timeout value for waiting on demod lock during channel scan (millisec).
 */
 #define DRXJ_SCAN_TIMEOUT    1000
 
-/**
+/*
 * \def HI_I2C_DELAY
 * \brief HI timing delay for I2C timing (in nano seconds)
 *
@@ -234,7 +234,7 @@ DEFINES
 */
 #define HI_I2C_DELAY    42
 
-/**
+/*
 * \def HI_I2C_BRIDGE_DELAY
 * \brief HI timing delay for I2C timing (in nano seconds)
 *
@@ -242,13 +242,13 @@ DEFINES
 */
 #define HI_I2C_BRIDGE_DELAY   750
 
-/**
+/*
 * \brief Time Window for MER and SER Measurement in Units of Segment duration.
 */
 #define VSB_TOP_MEASUREMENT_PERIOD  64
 #define SYMBOLS_PER_SEGMENT         832
 
-/**
+/*
 * \brief bit rate and segment rate constants used for SER and BER.
 */
 /* values taken from the QAM microcode */
@@ -260,21 +260,21 @@ DEFINES
 #define DRXJ_QAM_SL_SIG_POWER_QAM64       43008
 #define DRXJ_QAM_SL_SIG_POWER_QAM128      20992
 #define DRXJ_QAM_SL_SIG_POWER_QAM256      43520
-/**
+/*
 * \brief Min supported symbolrates.
 */
 #ifndef DRXJ_QAM_SYMBOLRATE_MIN
 #define DRXJ_QAM_SYMBOLRATE_MIN          (520000)
 #endif
 
-/**
+/*
 * \brief Max supported symbolrates.
 */
 #ifndef DRXJ_QAM_SYMBOLRATE_MAX
 #define DRXJ_QAM_SYMBOLRATE_MAX         (7233000)
 #endif
 
-/**
+/*
 * \def DRXJ_QAM_MAX_WAITTIME
 * \brief Maximal wait time for QAM auto constellation in ms
 */
@@ -290,7 +290,7 @@ DEFINES
 #define DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME 200
 #endif
 
-/**
+/*
 * \def SCU status and results
 * \brief SCU
 */
@@ -299,7 +299,7 @@ DEFINES
 #define FEC_RS_MEASUREMENT_PERIOD   12894      /* 1 sec */
 #define FEC_RS_MEASUREMENT_PRESCALE 1  /* n sec */
 
-/**
+/*
 * \def DRX_AUD_MAX_DEVIATION
 * \brief Needed for calculation of prescale feature in AUD
 */
@@ -307,14 +307,14 @@ DEFINES
 #define DRXJ_AUD_MAX_FM_DEVIATION  100 /* kHz */
 #endif
 
-/**
+/*
 * \brief Needed for calculation of NICAM prescale feature in AUD
 */
 #ifndef DRXJ_AUD_MAX_NICAM_PRESCALE
 #define DRXJ_AUD_MAX_NICAM_PRESCALE  (9)       /* dB */
 #endif
 
-/**
+/*
 * \brief Needed for calculation of NICAM prescale feature in AUD
 */
 #ifndef DRXJ_AUD_MAX_WAITTIME
@@ -371,21 +371,21 @@ DEFINES
 /*============================================================================*/
 /*=== GLOBAL VARIABLEs =======================================================*/
 /*============================================================================*/
-/**
+/*
 */
 
-/**
+/*
 * \brief Temporary register definitions.
 *        (register definitions that are not yet available in register master)
 */
 
-/******************************************************************************/
+/*****************************************************************************/
 /* Audio block 0x103 is write only. To avoid shadowing in driver accessing    */
 /* RAM adresses directly. This must be READ ONLY to avoid problems.           */
 /* Writing to the interface adresses is more than only writing the RAM        */
 /* locations                                                                  */
-/******************************************************************************/
-/**
+/*****************************************************************************/
+/*
 * \brief RAM location of MODUS registers
 */
 #define AUD_DEM_RAM_MODUS_HI__A              0x10204A3
@@ -394,13 +394,13 @@ DEFINES
 #define AUD_DEM_RAM_MODUS_LO__A              0x10204A4
 #define AUD_DEM_RAM_MODUS_LO__M              0x0FFF
 
-/**
+/*
 * \brief RAM location of I2S config registers
 */
 #define AUD_DEM_RAM_I2S_CONFIG1__A           0x10204B1
 #define AUD_DEM_RAM_I2S_CONFIG2__A           0x10204B2
 
-/**
+/*
 * \brief RAM location of DCO config registers
 */
 #define AUD_DEM_RAM_DCO_B_HI__A              0x1020461
@@ -408,20 +408,20 @@ DEFINES
 #define AUD_DEM_RAM_DCO_A_HI__A              0x1020463
 #define AUD_DEM_RAM_DCO_A_LO__A              0x1020464
 
-/**
+/*
 * \brief RAM location of Threshold registers
 */
 #define AUD_DEM_RAM_NICAM_THRSHLD__A         0x102045A
 #define AUD_DEM_RAM_A2_THRSHLD__A            0x10204BB
 #define AUD_DEM_RAM_BTSC_THRSHLD__A          0x10204A6
 
-/**
+/*
 * \brief RAM location of Carrier Threshold registers
 */
 #define AUD_DEM_RAM_CM_A_THRSHLD__A          0x10204AF
 #define AUD_DEM_RAM_CM_B_THRSHLD__A          0x10204B0
 
-/**
+/*
 * \brief FM Matrix register fix
 */
 #ifdef AUD_DEM_WR_FM_MATRIX__A
@@ -430,7 +430,7 @@ DEFINES
 #define AUD_DEM_WR_FM_MATRIX__A              0x105006F
 
 /*============================================================================*/
-/**
+/*
 * \brief Defines required for audio
 */
 #define AUD_VOLUME_ZERO_DB                      115
@@ -443,14 +443,14 @@ DEFINES
 #define AUD_I2S_FREQUENCY_MIN                   12000UL
 #define AUD_RDS_ARRAY_SIZE                      18
 
-/**
+/*
 * \brief Needed for calculation of prescale feature in AUD
 */
 #ifndef DRX_AUD_MAX_FM_DEVIATION
 #define DRX_AUD_MAX_FM_DEVIATION  (100)        /* kHz */
 #endif
 
-/**
+/*
 * \brief Needed for calculation of NICAM prescale feature in AUD
 */
 #ifndef DRX_AUD_MAX_NICAM_PRESCALE
@@ -478,7 +478,7 @@ DEFINES
 /*=== REGISTER ACCESS MACROS =================================================*/
 /*============================================================================*/
 
-/**
+/*
 * This macro is used to create byte arrays for block writes.
 * Block writes speed up I2C traffic between host and demod.
 * The macro takes care of the required byte order in a 16 bits word.
@@ -486,7 +486,7 @@ DEFINES
 */
 #define DRXJ_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
                       ((u8)((((u16)x)>>8)&0xFF))
-/**
+/*
 * This macro is used to convert byte array to 16 bit register value for block read.
 * Block read speed up I2C traffic between host and demod.
 * The macro takes care of the required byte order in a 16 bits word.
@@ -501,7 +501,7 @@ DEFINES
 /*=== HI COMMAND RELATED DEFINES =============================================*/
 /*============================================================================*/
 
-/**
+/*
 * \brief General maximum number of retries for ucode command interfaces
 */
 #define DRXJ_MAX_RETRIES (100)
@@ -807,7 +807,7 @@ static struct drxj_data drxj_data_g = {
         },
 };
 
-/**
+/*
 * \var drxj_default_addr_g
 * \brief Default I2C address and device identifier.
 */
@@ -816,7 +816,7 @@ static struct i2c_device_addr drxj_default_addr_g = {
        DRXJ_DEF_DEMOD_DEV_ID   /* device id */
 };
 
-/**
+/*
 * \var drxj_default_comm_attr_g
 * \brief Default common attributes of a drxj demodulator instance.
 */
@@ -887,7 +887,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = {
        0                       /* mfx */
 };
 
-/**
+/*
 * \var drxj_default_demod_g
 * \brief Default drxj demodulator instance.
 */
@@ -897,7 +897,7 @@ static struct drx_demod_instance drxj_default_demod_g = {
        &drxj_data_g            /* demod device specific attributes */
 };
 
-/**
+/*
 * \brief Default audio data structure for DRK demodulator instance.
 *
 * This structure is DRXK specific.
@@ -997,7 +997,7 @@ struct drxj_hi_cmd {
 /*=== MICROCODE RELATED STRUCTURES ===========================================*/
 /*============================================================================*/
 
-/**
+/*
  * struct drxu_code_block_hdr - Structure of the microcode block headers
  *
  * @addr:      Destination address of the data in this block
@@ -1086,7 +1086,7 @@ static u32 frac28(u32 N, u32 D)
        return Q1;
 }
 
-/**
+/*
 * \fn u32 log1_times100( u32 x)
 * \brief Compute: 100*log10(x)
 * \param x 32 bits
@@ -1198,7 +1198,7 @@ static u32 log1_times100(u32 x)
 
 }
 
-/**
+/*
 * \fn u32 frac_times1e6( u16 N, u32 D)
 * \brief Compute: (N/D) * 1000000.
 * \param N nominator 16-bits.
@@ -1235,7 +1235,7 @@ static u32 frac_times1e6(u32 N, u32 D)
 /*============================================================================*/
 
 
-/**
+/*
 * \brief Values for NICAM prescaler gain. Computed from dB to integer
 *        and rounded. For calc used formula: 16*10^(prescaleGain[dB]/20).
 *
@@ -1280,7 +1280,7 @@ static const u16 nicam_presc_table_val[43] = {
 #define DRXJ_DAP_AUDTRIF_TIMEOUT 80    /* millisec */
 /*============================================================================*/
 
-/**
+/*
 * \fn bool is_handled_by_aud_tr_if( u32 addr )
 * \brief Check if this address is handled by the audio token ring interface.
 * \param addr
@@ -1386,7 +1386,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
 
 /*============================================================================*/
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_read_block (
 *      struct i2c_device_addr *dev_addr,      -- address of I2C device
@@ -1498,7 +1498,7 @@ static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr,
 }
 
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_read_reg16 (
 *     struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1531,7 +1531,7 @@ static int drxdap_fasi_read_reg16(struct i2c_device_addr *dev_addr,
        return rc;
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_read_reg32 (
 *     struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1566,7 +1566,7 @@ static int drxdap_fasi_read_reg32(struct i2c_device_addr *dev_addr,
        return rc;
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_write_block (
 *      struct i2c_device_addr *dev_addr,    -- address of I2C device
@@ -1705,7 +1705,7 @@ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
        return first_err;
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_write_reg16 (
 *     struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1734,7 +1734,7 @@ static int drxdap_fasi_write_reg16(struct i2c_device_addr *dev_addr,
        return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags);
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_read_modify_write_reg16 (
 *      struct i2c_device_addr *dev_addr,   -- address of I2C device
@@ -1778,7 +1778,7 @@ static int drxdap_fasi_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
        return rc;
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_write_reg32 (
 *     struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1811,7 +1811,7 @@ static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr,
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int drxj_dap_rm_write_reg16short
 * \brief Read modify write 16 bits audio register using short format only.
 * \param dev_addr
@@ -1890,7 +1890,7 @@ static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int drxj_dap_read_aud_reg16
 * \brief Read 16 bits audio register
 * \param dev_addr
@@ -1997,7 +1997,7 @@ static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr,
 }
 /*============================================================================*/
 
-/**
+/*
 * \fn int drxj_dap_write_aud_reg16
 * \brief Write 16 bits audio register
 * \param dev_addr
@@ -2086,7 +2086,7 @@ static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr,
 #define DRXJ_HI_ATOMIC_READ      SIO_HI_RA_RAM_PAR_3_ACP_RW_READ
 #define DRXJ_HI_ATOMIC_WRITE     SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE
 
-/**
+/*
 * \fn int drxj_dap_atomic_read_write_block()
 * \brief Basic access routine for atomic read or write access
 * \param dev_addr  pointer to i2c dev address
@@ -2168,7 +2168,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int drxj_dap_atomic_read_reg32()
 * \brief Atomic read of 32 bits words
 */
@@ -2215,7 +2215,7 @@ int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr,
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int hi_cfg_command()
 * \brief Configure HI with settings stored in the demod structure.
 * \param demod Demodulator.
@@ -2258,7 +2258,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int hi_command()
 * \brief Configure HI with settings stored in the demod structure.
 * \param dev_addr I2C address.
@@ -2369,7 +2369,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int init_hi( const struct drx_demod_instance *demod )
 * \brief Initialise and configurate HI.
 * \param demod pointer to demod data.
@@ -2450,7 +2450,7 @@ rw_error:
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int get_device_capabilities()
 * \brief Get and store device capabilities.
 * \param demod  Pointer to demodulator instance.
@@ -2656,7 +2656,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int power_up_device()
 * \brief Power up device.
 * \param demod  Pointer to demodulator instance.
@@ -2710,7 +2710,7 @@ static int power_up_device(struct drx_demod_instance *demod)
 /*----------------------------------------------------------------------------*/
 /* MPEG Output Configuration Functions - begin                                */
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int ctrl_set_cfg_mpeg_output()
 * \brief Set MPEG output configuration of the device.
 * \param devmod  Pointer to demodulator instance.
@@ -3356,7 +3356,7 @@ rw_error:
 /* miscellaneous configurations - begin                           */
 /*----------------------------------------------------------------------------*/
 
-/**
+/*
 * \fn int set_mpegtei_handling()
 * \brief Activate MPEG TEI handling settings.
 * \param devmod  Pointer to demodulator instance.
@@ -3429,7 +3429,7 @@ rw_error:
 }
 
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int bit_reverse_mpeg_output()
 * \brief Set MPEG output bit-endian settings.
 * \param devmod  Pointer to demodulator instance.
@@ -3472,7 +3472,7 @@ rw_error:
 }
 
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int set_mpeg_start_width()
 * \brief Set MPEG start width.
 * \param devmod  Pointer to demodulator instance.
@@ -3522,7 +3522,7 @@ rw_error:
 /*----------------------------------------------------------------------------*/
 /* UIO Configuration Functions - begin                                        */
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int ctrl_set_uio_cfg()
 * \brief Configure modus oprandi UIO.
 * \param demod Pointer to demodulator instance.
@@ -3659,7 +3659,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int ctrl_uio_write()
 * \brief Write to a UIO.
 * \param demod Pointer to demodulator instance.
@@ -3868,7 +3868,7 @@ rw_error:
 /*----------------------------------------------------------------------------*/
 /* I2C Bridge Functions - begin                                               */
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int ctrl_i2c_bridge()
 * \brief Open or close the I2C switch to tuner.
 * \param demod Pointer to demodulator instance.
@@ -3903,7 +3903,7 @@ ctrl_i2c_bridge(struct drx_demod_instance *demod, bool *bridge_closed)
 /*----------------------------------------------------------------------------*/
 /* Smart antenna Functions - begin                                            */
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int smart_ant_init()
 * \brief Initialize Smart Antenna.
 * \param pointer to struct drx_demod_instance.
@@ -4116,7 +4116,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int DRXJ_DAP_SCUAtomicReadWriteBlock()
 * \brief Basic access routine for SCU atomic read or write access
 * \param dev_addr  pointer to i2c dev address
@@ -4188,7 +4188,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int DRXJ_DAP_AtomicReadReg16()
 * \brief Atomic read of 16 bits words
 */
@@ -4216,7 +4216,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
 }
 
 /*============================================================================*/
-/**
+/*
 * \fn int drxj_dap_scu_atomic_write_reg16()
 * \brief Atomic read of 16 bits words
 */
@@ -4237,7 +4237,7 @@ int drxj_dap_scu_atomic_write_reg16(struct i2c_device_addr *dev_addr,
 }
 
 /* -------------------------------------------------------------------------- */
-/**
+/*
 * \brief Measure result of ADC synchronisation
 * \param demod demod instance
 * \param count (returned) count
@@ -4297,7 +4297,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \brief Synchronize analog and digital clock domains
 * \param demod demod instance
 * \return int.
@@ -4365,7 +4365,7 @@ rw_error:
 /*==                8VSB & QAM COMMON DATAPATH FUNCTIONS                    ==*/
 /*============================================================================*/
 /*============================================================================*/
-/**
+/*
 * \fn int init_agc ()
 * \brief Initialize AGC for all standards.
 * \param demod instance of demodulator.
@@ -4741,7 +4741,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_frequency ()
 * \brief Set frequency shift.
 * \param demod instance of demodulator.
@@ -4839,7 +4839,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int get_acc_pkt_err()
 * \brief Retrieve signal strength for VSB and QAM.
 * \param demod Pointer to demod instance
@@ -4891,7 +4891,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_agc_rf ()
 * \brief Configure RF AGC
 * \param demod instance of demodulator.
@@ -5105,7 +5105,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_agc_if ()
 * \brief Configure If AGC
 * \param demod instance of demodulator.
@@ -5334,7 +5334,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_iqm_af ()
 * \brief Configure IQM AF registers
 * \param demod instance of demodulator.
@@ -5380,7 +5380,7 @@ rw_error:
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int power_down_vsb ()
 * \brief Powr down QAM related blocks.
 * \param demod instance of demodulator.
@@ -5478,7 +5478,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_vsb_leak_n_gain ()
 * \brief Set ATSC demod.
 * \param demod instance of demodulator.
@@ -5694,7 +5694,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_vsb()
 * \brief Set 8VSB demod.
 * \param demod instance of demodulator.
@@ -6200,7 +6200,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn static short get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr, u16 *PckErrs)
 * \brief Get the values of packet error in 8VSB mode
 * \return Error code
@@ -6239,7 +6239,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn static short GetVSBBer(struct i2c_device_addr *dev_addr, u32 *ber)
 * \brief Get the values of ber in VSB mode
 * \return Error code
@@ -6284,7 +6284,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn static short get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber)
 * \brief Get the values of ber in VSB mode
 * \return Error code
@@ -6306,7 +6306,7 @@ static int get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr,
        return 0;
 }
 
-/**
+/*
 * \fn static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer)
 * \brief Get the values of MER
 * \return Error code
@@ -6340,7 +6340,7 @@ rw_error:
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int power_down_qam ()
 * \brief Powr down QAM related blocks.
 * \param demod instance of demodulator.
@@ -6444,7 +6444,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam_measurement ()
 * \brief Setup of the QAM Measuremnt intervals for signal quality
 * \param demod instance of demod.
@@ -6656,7 +6656,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam16 ()
 * \brief QAM16 specific setup
 * \param demod instance of demod.
@@ -6891,7 +6891,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam32 ()
 * \brief QAM32 specific setup
 * \param demod instance of demod.
@@ -7126,7 +7126,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam64 ()
 * \brief QAM64 specific setup
 * \param demod instance of demod.
@@ -7362,7 +7362,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam128 ()
 * \brief QAM128 specific setup
 * \param demod: instance of demod.
@@ -7597,7 +7597,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam256 ()
 * \brief QAM256 specific setup
 * \param demod: instance of demod.
@@ -7835,7 +7835,7 @@ rw_error:
 #define QAM_SET_OP_CONSTELLATION 0x2
 #define QAM_SET_OP_SPECTRUM 0X4
 
-/**
+/*
 * \fn int set_qam ()
 * \brief Set QAM demod.
 * \param demod:   instance of demod.
@@ -8845,7 +8845,7 @@ rw_error:
 #define  DEMOD_LOCKED   0x1
 #define  SYNC_FLIPPED   0x2
 #define  SPEC_MIRRORED  0x4
-/**
+/*
 * \fn int qam64auto ()
 * \brief auto do sync pattern switching and mirroring.
 * \param demod:   instance of demod.
@@ -8993,7 +8993,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int qam256auto ()
 * \brief auto do sync pattern switching and mirroring.
 * \param demod:   instance of demod.
@@ -9077,7 +9077,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_qam_channel ()
 * \brief Set QAM channel according to the requested constellation.
 * \param demod:   instance of demod.
@@ -9284,7 +9284,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn static short get_qamrs_err_count(struct i2c_device_addr *dev_addr)
 * \brief Get RS error count in QAM mode (used for post RS BER calculation)
 * \return Error code
@@ -9355,7 +9355,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
  * \fn int get_sig_strength()
  * \brief Retrieve signal strength for VSB and QAM.
  * \param demod Pointer to demod instance
@@ -9435,7 +9435,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int ctrl_get_qam_sig_quality()
 * \brief Retrieve QAM signal quality from device.
 * \param devmod Pointer to demodulator instance.
@@ -9721,7 +9721,7 @@ rw_error:
 */
 /* -------------------------------------------------------------------------- */
 
-/**
+/*
 * \fn int power_down_atv ()
 * \brief Power down ATV.
 * \param demod instance of demodulator
@@ -9822,7 +9822,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Power up AUD.
 * \param demod instance of demodulator
 * \return int.
@@ -9850,7 +9850,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_orx_nsu_aox()
 * \brief Configure OrxNsuAox for OOB
 * \param demod instance of demodulator.
@@ -9884,7 +9884,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int ctrl_set_oob()
 * \brief Set OOB channel to be used.
 * \param demod instance of demodulator
@@ -9986,9 +9986,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
                    20;
        }
 
-   /*********/
+   /********/
        /* Stop  */
-   /*********/
+   /********/
        rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0);
        if (rc != 0) {
                pr_err("error %d\n", rc);
@@ -10004,9 +10004,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
                pr_err("error %d\n", rc);
                goto rw_error;
        }
-   /*********/
+   /********/
        /* Reset */
-   /*********/
+   /********/
        scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
            | SCU_RAM_COMMAND_CMD_DEMOD_RESET;
        scu_cmd.parameter_len = 0;
@@ -10017,9 +10017,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
                pr_err("error %d\n", rc);
                goto rw_error;
        }
-   /***********/
+   /**********/
        /* SET_ENV */
-   /***********/
+   /**********/
        /* set frequency, spectrum inversion and data rate */
        scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
            | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV;
@@ -10376,9 +10376,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
                pr_err("error %d\n", rc);
                goto rw_error;
        }
-       /*********/
+       /********/
        /* Start */
-       /*********/
+       /********/
        scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
            | SCU_RAM_COMMAND_CMD_DEMOD_START;
        scu_cmd.parameter_len = 0;
@@ -10419,7 +10419,7 @@ rw_error:
 /*=============================================================================
   ===== ctrl_set_channel() ==========================================================
   ===========================================================================*/
-/**
+/*
 * \fn int ctrl_set_channel()
 * \brief Select a new transmission channel.
 * \param demod instance of demod.
@@ -10652,7 +10652,7 @@ rw_error:
   ===== SigQuality() ==========================================================
   ===========================================================================*/
 
-/**
+/*
 * \fn int ctrl_sig_quality()
 * \brief Retrieve signal quality form device.
 * \param devmod Pointer to demodulator instance.
@@ -10768,7 +10768,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int ctrl_lock_status()
 * \brief Retrieve lock status .
 * \param dev_addr Pointer to demodulator device address.
@@ -10856,7 +10856,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int ctrl_set_standard()
 * \brief Set modulation standard to be used.
 * \param standard Modulation standard.
@@ -11012,7 +11012,7 @@ static void drxj_reset_mode(struct drxj_data *ext_attr)
        ext_attr->vsb_pre_saw_cfg.use_pre_saw = true;
 }
 
-/**
+/*
 * \fn int ctrl_power_mode()
 * \brief Set the power mode of the device to the specified power mode
 * \param demod Pointer to demodulator instance.
@@ -11171,7 +11171,7 @@ rw_error:
 /*== CTRL Set/Get Config related functions ===================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int ctrl_set_cfg_pre_saw()
 * \brief Set Pre-saw reference.
 * \param demod demod instance
@@ -11234,7 +11234,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int ctrl_set_cfg_afe_gain()
 * \brief Set AFE Gain.
 * \param demod demod instance
@@ -11324,7 +11324,7 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
                       enum drxu_code_action action);
 static int drxj_set_lna_state(struct drx_demod_instance *demod, bool state);
 
-/**
+/*
 * \fn drxj_open()
 * \brief Open the demod instance, configure device, configure drxdriver
 * \return Status_t Return status.
@@ -11543,7 +11543,7 @@ rw_error:
 }
 
 /*============================================================================*/
-/**
+/*
 * \fn drxj_close()
 * \brief Close the demod instance, power down the device
 * \return Status_t Return status.
@@ -11594,7 +11594,7 @@ rw_error:
  * Microcode related functions
  */
 
-/**
+/*
  * drx_u_code_compute_crc      - Compute CRC of block of microcode data.
  * @block_data: Pointer to microcode data.
  * @nr_words:   Size of microcode block (number of 16 bits words).
@@ -11622,7 +11622,7 @@ static u16 drx_u_code_compute_crc(u8 *block_data, u16 nr_words)
        return (u16)(crc_word >> 16);
 }
 
-/**
+/*
  * drx_check_firmware - checks if the loaded firmware is valid
  *
  * @demod:     demod structure
@@ -11708,7 +11708,7 @@ eof:
        return -EINVAL;
 }
 
-/**
+/*
  * drx_ctrl_u_code - Handle microcode upload or verify.
  * @dev_addr: Address of device.
  * @mc_info:  Pointer to information about microcode data.
index 6c5b8f7..d3ee1c2 100644 (file)
@@ -69,15 +69,15 @@ TYPEDEFS
 
        struct drxjscu_cmd {
                u16 command;
-                       /**< Command number */
+                       /*< Command number */
                u16 parameter_len;
-                       /**< Data length in byte */
+                       /*< Data length in byte */
                u16 result_len;
-                       /**< result length in byte */
+                       /*< result length in byte */
                u16 *parameter;
-                       /**< General purpous param */
+                       /*< General purpous param */
                u16 *result;
-                       /**< General purpous param */};
+                       /*< General purpous param */};
 
 /*============================================================================*/
 /*============================================================================*/
@@ -130,7 +130,7 @@ TYPEDEFS
 
                DRXJ_CFG_MAX    /* dummy, never to be used */};
 
-/**
+/*
 * /struct enum drxj_cfg_smart_ant_io * smart antenna i/o.
 */
 enum drxj_cfg_smart_ant_io {
@@ -138,7 +138,7 @@ enum drxj_cfg_smart_ant_io {
        DRXJ_SMT_ANT_INPUT
 };
 
-/**
+/*
 * /struct struct drxj_cfg_smart_ant * Set smart antenna.
 */
        struct drxj_cfg_smart_ant {
@@ -146,7 +146,7 @@ enum drxj_cfg_smart_ant_io {
                u16 ctrl_data;
        };
 
-/**
+/*
 * /struct DRXJAGCSTATUS_t
 * AGC status information from the DRXJ-IQM-AF.
 */
@@ -158,7 +158,7 @@ struct drxj_agc_status {
 
 /* DRXJ_CFG_AGC_RF, DRXJ_CFG_AGC_IF */
 
-/**
+/*
 * /struct enum drxj_agc_ctrl_mode * Available AGCs modes in the DRXJ.
 */
        enum drxj_agc_ctrl_mode {
@@ -166,7 +166,7 @@ struct drxj_agc_status {
                DRX_AGC_CTRL_USER,
                DRX_AGC_CTRL_OFF};
 
-/**
+/*
 * /struct struct drxj_cfg_agc * Generic interface for all AGCs present on the DRXJ.
 */
        struct drxj_cfg_agc {
@@ -182,7 +182,7 @@ struct drxj_agc_status {
 
 /* DRXJ_CFG_PRE_SAW */
 
-/**
+/*
 * /struct struct drxj_cfg_pre_saw * Interface to configure pre SAW sense.
 */
        struct drxj_cfg_pre_saw {
@@ -192,14 +192,14 @@ struct drxj_agc_status {
 
 /* DRXJ_CFG_AFE_GAIN */
 
-/**
+/*
 * /struct struct drxj_cfg_afe_gain * Interface to configure gain of AFE (LNA + PGA).
 */
        struct drxj_cfg_afe_gain {
                enum drx_standard standard;     /* standard to which these settings apply */
                u16 gain;       /* gain in 0.1 dB steps, DRXJ range 140 .. 335 */};
 
-/**
+/*
 * /struct drxjrs_errors
 * Available failure information in DRXJ_FEC_RS.
 *
@@ -208,25 +208,25 @@ struct drxj_agc_status {
 */
        struct drxjrs_errors {
                u16 nr_bit_errors;
-                               /**< no of pre RS bit errors          */
+                               /*< no of pre RS bit errors          */
                u16 nr_symbol_errors;
-                               /**< no of pre RS symbol errors       */
+                               /*< no of pre RS symbol errors       */
                u16 nr_packet_errors;
-                               /**< no of pre RS packet errors       */
+                               /*< no of pre RS packet errors       */
                u16 nr_failures;
-                               /**< no of post RS failures to decode */
+                               /*< no of post RS failures to decode */
                u16 nr_snc_par_fail_count;
-                               /**< no of post RS bit erros          */
+                               /*< no of post RS bit erros          */
        };
 
-/**
+/*
 * /struct struct drxj_cfg_vsb_misc * symbol error rate
 */
        struct drxj_cfg_vsb_misc {
                u32 symb_error;
-                             /**< symbol error rate sps */};
+                             /*< symbol error rate sps */};
 
-/**
+/*
 * /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
 *
 */
@@ -234,7 +234,7 @@ struct drxj_agc_status {
                DRXJ_MPEG_START_WIDTH_1CLKCYC,
                DRXJ_MPEG_START_WIDTH_8CLKCYC};
 
-/**
+/*
 * /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
 *
 */
@@ -247,20 +247,20 @@ struct drxj_agc_status {
                DRXJ_MPEGOUTPUT_CLOCK_RATE_25313K,
                DRXJ_MPEGOUTPUT_CLOCK_RATE_21696K};
 
-/**
+/*
 * /struct DRXJCfgMisc_t
 * Change TEI bit of MPEG output
 * reverse MPEG output bit order
 * set MPEG output clock rate
 */
        struct drxj_cfg_mpeg_output_misc {
-               bool disable_tei_handling;            /**< if true pass (not change) TEI bit */
-               bool bit_reverse_mpeg_outout;         /**< if true, parallel: msb on MD0; serial: lsb out first */
+               bool disable_tei_handling;            /*< if true pass (not change) TEI bit */
+               bool bit_reverse_mpeg_outout;         /*< if true, parallel: msb on MD0; serial: lsb out first */
                enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
-                                                     /**< set MPEG output clock rate that overwirtes the derived one from symbol rate */
-               enum drxj_mpeg_start_width mpeg_start_width;  /**< set MPEG output start width */};
+                                                     /*< set MPEG output clock rate that overwirtes the derived one from symbol rate */
+               enum drxj_mpeg_start_width mpeg_start_width;  /*< set MPEG output start width */};
 
-/**
+/*
 * /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
 */
        enum drxj_xtal_freq {
@@ -269,21 +269,21 @@ struct drxj_agc_status {
                DRXJ_XTAL_FREQ_20P25MHZ,
                DRXJ_XTAL_FREQ_4MHZ};
 
-/**
+/*
 * /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
 */
        enum drxji2c_speed {
                DRXJ_I2C_SPEED_400KBPS,
                DRXJ_I2C_SPEED_100KBPS};
 
-/**
+/*
 * /struct struct drxj_cfg_hw_cfg * Get hw configuration, such as crystal reference frequency, I2C speed, etc...
 */
        struct drxj_cfg_hw_cfg {
                enum drxj_xtal_freq xtal_freq;
-                                  /**< crystal reference frequency */
+                                  /*< crystal reference frequency */
                enum drxji2c_speed i2c_speed;
-                                  /**< 100 or 400 kbps */};
+                                  /*< 100 or 400 kbps */};
 
 /*
  *  DRXJ_CFG_ATV_MISC
@@ -352,7 +352,7 @@ struct drxj_cfg_oob_misc {
  *  DRXJ_CFG_ATV_OUTPUT
  */
 
-/**
+/*
 * /enum DRXJAttenuation_t
 * Attenuation setting for SIF AGC.
 *
@@ -363,7 +363,7 @@ struct drxj_cfg_oob_misc {
                DRXJ_SIF_ATTENUATION_6DB,
                DRXJ_SIF_ATTENUATION_9DB};
 
-/**
+/*
 * /struct struct drxj_cfg_atv_output * SIF attenuation setting.
 *
 */
@@ -398,7 +398,7 @@ struct drxj_cfg_atv_output {
 /*============================================================================*/
 
 /*========================================*/
-/**
+/*
 * /struct struct drxj_data * DRXJ specific attributes.
 *
 * Global data container for DRXJ specific data.
@@ -406,93 +406,93 @@ struct drxj_cfg_atv_output {
 */
        struct drxj_data {
                /* device capabilties (determined during drx_open()) */
-               bool has_lna;             /**< true if LNA (aka PGA) present */
-               bool has_oob;             /**< true if OOB supported */
-               bool has_ntsc;            /**< true if NTSC supported */
-               bool has_btsc;            /**< true if BTSC supported */
-               bool has_smatx;   /**< true if mat_tx is available */
-               bool has_smarx;   /**< true if mat_rx is available */
-               bool has_gpio;            /**< true if GPIO is available */
-               bool has_irqn;            /**< true if IRQN is available */
+               bool has_lna;             /*< true if LNA (aka PGA) present */
+               bool has_oob;             /*< true if OOB supported */
+               bool has_ntsc;            /*< true if NTSC supported */
+               bool has_btsc;            /*< true if BTSC supported */
+               bool has_smatx;   /*< true if mat_tx is available */
+               bool has_smarx;   /*< true if mat_rx is available */
+               bool has_gpio;            /*< true if GPIO is available */
+               bool has_irqn;            /*< true if IRQN is available */
                /* A1/A2/A... */
-               u8 mfx;           /**< metal fix */
+               u8 mfx;           /*< metal fix */
 
                /* tuner settings */
-               bool mirror_freq_spect_oob;/**< tuner inversion (true = tuner mirrors the signal */
+               bool mirror_freq_spect_oob;/*< tuner inversion (true = tuner mirrors the signal */
 
                /* standard/channel settings */
-               enum drx_standard standard;       /**< current standard information                     */
+               enum drx_standard standard;       /*< current standard information                     */
                enum drx_modulation constellation;
-                                         /**< current constellation                            */
-               s32 frequency; /**< center signal frequency in KHz                   */
+                                         /*< current constellation                            */
+               s32 frequency; /*< center signal frequency in KHz                   */
                enum drx_bandwidth curr_bandwidth;
-                                         /**< current channel bandwidth                        */
-               enum drx_mirror mirror;   /**< current channel mirror                           */
+                                         /*< current channel bandwidth                        */
+               enum drx_mirror mirror;   /*< current channel mirror                           */
 
                /* signal quality information */
-               u32 fec_bits_desired;     /**< BER accounting period                            */
-               u16 fec_vd_plen;          /**< no of trellis symbols: VD SER measurement period */
-               u16 qam_vd_prescale;      /**< Viterbi Measurement Prescale                     */
-               u16 qam_vd_period;        /**< Viterbi Measurement period                       */
-               u16 fec_rs_plen;          /**< defines RS BER measurement period                */
-               u16 fec_rs_prescale;      /**< ReedSolomon Measurement Prescale                 */
-               u16 fec_rs_period;        /**< ReedSolomon Measurement period                   */
-               bool reset_pkt_err_acc;   /**< Set a flag to reset accumulated packet error     */
-               u16 pkt_err_acc_start;    /**< Set a flag to reset accumulated packet error     */
+               u32 fec_bits_desired;     /*< BER accounting period                            */
+               u16 fec_vd_plen;          /*< no of trellis symbols: VD SER measurement period */
+               u16 qam_vd_prescale;      /*< Viterbi Measurement Prescale                     */
+               u16 qam_vd_period;        /*< Viterbi Measurement period                       */
+               u16 fec_rs_plen;          /*< defines RS BER measurement period                */
+               u16 fec_rs_prescale;      /*< ReedSolomon Measurement Prescale                 */
+               u16 fec_rs_period;        /*< ReedSolomon Measurement period                   */
+               bool reset_pkt_err_acc;   /*< Set a flag to reset accumulated packet error     */
+               u16 pkt_err_acc_start;    /*< Set a flag to reset accumulated packet error     */
 
                /* HI configuration */
-               u16 hi_cfg_timing_div;    /**< HI Configure() parameter 2                       */
-               u16 hi_cfg_bridge_delay;          /**< HI Configure() parameter 3                       */
-               u16 hi_cfg_wake_up_key;   /**< HI Configure() parameter 4                       */
-               u16 hi_cfg_ctrl;          /**< HI Configure() parameter 5                       */
-               u16 hi_cfg_transmit;      /**< HI Configure() parameter 6                       */
+               u16 hi_cfg_timing_div;    /*< HI Configure() parameter 2                       */
+               u16 hi_cfg_bridge_delay;          /*< HI Configure() parameter 3                       */
+               u16 hi_cfg_wake_up_key;   /*< HI Configure() parameter 4                       */
+               u16 hi_cfg_ctrl;          /*< HI Configure() parameter 5                       */
+               u16 hi_cfg_transmit;      /*< HI Configure() parameter 6                       */
 
                /* UIO configuration */
-               enum drxuio_mode uio_sma_rx_mode;/**< current mode of SmaRx pin                        */
-               enum drxuio_mode uio_sma_tx_mode;/**< current mode of SmaTx pin                        */
-               enum drxuio_mode uio_gpio_mode; /**< current mode of ASEL pin                         */
-               enum drxuio_mode uio_irqn_mode; /**< current mode of IRQN pin                         */
+               enum drxuio_mode uio_sma_rx_mode;/*< current mode of SmaRx pin                        */
+               enum drxuio_mode uio_sma_tx_mode;/*< current mode of SmaTx pin                        */
+               enum drxuio_mode uio_gpio_mode; /*< current mode of ASEL pin                         */
+               enum drxuio_mode uio_irqn_mode; /*< current mode of IRQN pin                         */
 
                /* IQM fs frequecy shift and inversion */
-               u32 iqm_fs_rate_ofs;       /**< frequency shifter setting after setchannel      */
-               bool pos_image;    /**< Ture: positive image                            */
+               u32 iqm_fs_rate_ofs;       /*< frequency shifter setting after setchannel      */
+               bool pos_image;    /*< Ture: positive image                            */
                /* IQM RC frequecy shift */
-               u32 iqm_rc_rate_ofs;       /**< frequency shifter setting after setchannel      */
+               u32 iqm_rc_rate_ofs;       /*< frequency shifter setting after setchannel      */
 
                /* ATV configuration */
-               u32 atv_cfg_changed_flags; /**< flag: flags cfg changes */
-               s16 atv_top_equ0[DRXJ_COEF_IDX_MAX];         /**< shadow of ATV_TOP_EQU0__A */
-               s16 atv_top_equ1[DRXJ_COEF_IDX_MAX];         /**< shadow of ATV_TOP_EQU1__A */
-               s16 atv_top_equ2[DRXJ_COEF_IDX_MAX];         /**< shadow of ATV_TOP_EQU2__A */
-               s16 atv_top_equ3[DRXJ_COEF_IDX_MAX];         /**< shadow of ATV_TOP_EQU3__A */
-               bool phase_correction_bypass;/**< flag: true=bypass */
-               s16 atv_top_vid_peak;     /**< shadow of ATV_TOP_VID_PEAK__A */
-               u16 atv_top_noise_th;     /**< shadow of ATV_TOP_NOISE_TH__A */
-               bool enable_cvbs_output;  /**< flag CVBS ouput enable */
-               bool enable_sif_output;   /**< flag SIF ouput enable */
+               u32 atv_cfg_changed_flags; /*< flag: flags cfg changes */
+               s16 atv_top_equ0[DRXJ_COEF_IDX_MAX];         /*< shadow of ATV_TOP_EQU0__A */
+               s16 atv_top_equ1[DRXJ_COEF_IDX_MAX];         /*< shadow of ATV_TOP_EQU1__A */
+               s16 atv_top_equ2[DRXJ_COEF_IDX_MAX];         /*< shadow of ATV_TOP_EQU2__A */
+               s16 atv_top_equ3[DRXJ_COEF_IDX_MAX];         /*< shadow of ATV_TOP_EQU3__A */
+               bool phase_correction_bypass;/*< flag: true=bypass */
+               s16 atv_top_vid_peak;     /*< shadow of ATV_TOP_VID_PEAK__A */
+               u16 atv_top_noise_th;     /*< shadow of ATV_TOP_NOISE_TH__A */
+               bool enable_cvbs_output;  /*< flag CVBS ouput enable */
+               bool enable_sif_output;   /*< flag SIF ouput enable */
                 enum drxjsif_attenuation sif_attenuation;
-                                         /**< current SIF att setting */
+                                         /*< current SIF att setting */
                /* Agc configuration for QAM and VSB */
-               struct drxj_cfg_agc qam_rf_agc_cfg; /**< qam RF AGC config */
-               struct drxj_cfg_agc qam_if_agc_cfg; /**< qam IF AGC config */
-               struct drxj_cfg_agc vsb_rf_agc_cfg; /**< vsb RF AGC config */
-               struct drxj_cfg_agc vsb_if_agc_cfg; /**< vsb IF AGC config */
+               struct drxj_cfg_agc qam_rf_agc_cfg; /*< qam RF AGC config */
+               struct drxj_cfg_agc qam_if_agc_cfg; /*< qam IF AGC config */
+               struct drxj_cfg_agc vsb_rf_agc_cfg; /*< vsb RF AGC config */
+               struct drxj_cfg_agc vsb_if_agc_cfg; /*< vsb IF AGC config */
 
                /* PGA gain configuration for QAM and VSB */
-               u16 qam_pga_cfg;          /**< qam PGA config */
-               u16 vsb_pga_cfg;          /**< vsb PGA config */
+               u16 qam_pga_cfg;          /*< qam PGA config */
+               u16 vsb_pga_cfg;          /*< vsb PGA config */
 
                /* Pre SAW configuration for QAM and VSB */
                struct drxj_cfg_pre_saw qam_pre_saw_cfg;
-                                         /**< qam pre SAW config */
+                                         /*< qam pre SAW config */
                struct drxj_cfg_pre_saw vsb_pre_saw_cfg;
-                                         /**< qam pre SAW config */
+                                         /*< qam pre SAW config */
 
                /* Version information */
-               char v_text[2][12];       /**< allocated text versions */
-               struct drx_version v_version[2]; /**< allocated versions structs */
+               char v_text[2][12];       /*< allocated text versions */
+               struct drx_version v_version[2]; /*< allocated versions structs */
                struct drx_version_list v_list_elements[2];
-                                         /**< allocated version list */
+                                         /*< allocated version list */
 
                /* smart antenna configuration */
                bool smart_ant_inverted;
@@ -502,25 +502,25 @@ struct drxj_cfg_atv_output {
                bool oob_power_on;
 
                /* MPEG static bitrate setting */
-               u32 mpeg_ts_static_bitrate;  /**< bitrate static MPEG output */
-               bool disable_te_ihandling;  /**< MPEG TS TEI handling */
-               bool bit_reverse_mpeg_outout;/**< MPEG output bit order */
+               u32 mpeg_ts_static_bitrate;  /*< bitrate static MPEG output */
+               bool disable_te_ihandling;  /*< MPEG TS TEI handling */
+               bool bit_reverse_mpeg_outout;/*< MPEG output bit order */
                 enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
-                                           /**< MPEG output clock rate */
+                                           /*< MPEG output clock rate */
                 enum drxj_mpeg_start_width mpeg_start_width;
-                                           /**< MPEG Start width */
+                                           /*< MPEG Start width */
 
                /* Pre SAW & Agc configuration for ATV */
                struct drxj_cfg_pre_saw atv_pre_saw_cfg;
-                                         /**< atv pre SAW config */
-               struct drxj_cfg_agc atv_rf_agc_cfg; /**< atv RF AGC config */
-               struct drxj_cfg_agc atv_if_agc_cfg; /**< atv IF AGC config */
-               u16 atv_pga_cfg;          /**< atv pga config    */
+                                         /*< atv pre SAW config */
+               struct drxj_cfg_agc atv_rf_agc_cfg; /*< atv RF AGC config */
+               struct drxj_cfg_agc atv_if_agc_cfg; /*< atv IF AGC config */
+               u16 atv_pga_cfg;          /*< atv pga config    */
 
                u32 curr_symbol_rate;
 
                /* pin-safe mode */
-               bool pdr_safe_mode;         /**< PDR safe mode activated      */
+               bool pdr_safe_mode;         /*< PDR safe mode activated      */
                u16 pdr_safe_restore_val_gpio;
                u16 pdr_safe_restore_val_v_sync;
                u16 pdr_safe_restore_val_sma_rx;
@@ -531,12 +531,12 @@ struct drxj_cfg_atv_output {
                enum drxj_cfg_oob_lo_power oob_lo_pow;
 
                struct drx_aud_data aud_data;
-                                   /**< audio storage                  */};
+                                   /*< audio storage                  */};
 
 /*-------------------------------------------------------------------------
 Access MACROS
 -------------------------------------------------------------------------*/
-/**
+/*
 * \brief Compilable references to attributes
 * \param d pointer to demod instance
 *
@@ -554,7 +554,7 @@ Access MACROS
 DEFINES
 -------------------------------------------------------------------------*/
 
-/**
+/*
 * \def DRXJ_NTSC_CARRIER_FREQ_OFFSET
 * \brief Offset from picture carrier to centre frequency in kHz, in RF domain
 *
@@ -569,7 +569,7 @@ DEFINES
 */
 #define DRXJ_NTSC_CARRIER_FREQ_OFFSET           ((s32)(1750))
 
-/**
+/*
 * \def DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET
 * \brief Offset from picture carrier to centre frequency in kHz, in RF domain
 *
@@ -585,7 +585,7 @@ DEFINES
 */
 #define DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET   ((s32)(2375))
 
-/**
+/*
 * \def DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET
 * \brief Offset from picture carrier to centre frequency in kHz, in RF domain
 *
@@ -601,7 +601,7 @@ DEFINES
 */
 #define DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET ((s32)(2775))
 
-/**
+/*
 * \def DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET
 * \brief Offset from picture carrier to centre frequency in kHz, in RF domain
 *
@@ -616,7 +616,7 @@ DEFINES
 */
 #define DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET   ((s32)(-3255))
 
-/**
+/*
 * \def DRXJ_FM_CARRIER_FREQ_OFFSET
 * \brief Offset from sound carrier to centre frequency in kHz, in RF domain
 *
index eb9bdc9..b16fedb 100644 (file)
  * @antenna_dvbt:      GPIO bit for changing antenna to DVB-C. A value of 1
  *                     means that 1=DVBC, 0 = DVBT. Zero means the opposite.
  * @mpeg_out_clk_strength: DRXK Mpeg output clock drive strength.
+ * @chunk_size:                maximum size for I2C messages
  * @microcode_name:    Name of the firmware file with the microcode
  * @qam_demod_parameter_count: The number of parameters used for the command
  *                             to set the demodulator parameters. All
  *                             firmwares are using the 2-parameter commmand.
- *                             An exception is the "drxk_a3.mc" firmware,
+ *                             An exception is the ``drxk_a3.mc`` firmware,
  *                             which uses the 4-parameter command.
  *                             A value of 0 (default) or lower indicates that
  *                             the correct number of parameters will be
  *                             automatically detected.
  *
- * On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
+ * On the ``*_gpio`` vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
  * UIO-3.
  */
 struct drxk_config {
@@ -52,6 +53,14 @@ struct drxk_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_DRXK)
+/**
+ * Attach a drxk demod
+ *
+ * @config: pointer to &struct drxk_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
                                        struct i2c_adapter *i2c);
 #else
index 48a8aad..f59ac2e 100644 (file)
@@ -207,9 +207,9 @@ static inline u32 log10times100(u32 value)
        return (100L * intlog10(value)) >> 24;
 }
 
-/****************************************************************************/
+/***************************************************************************/
 /* I2C **********************************************************************/
-/****************************************************************************/
+/***************************************************************************/
 
 static int drxk_i2c_lock(struct drxk_state *state)
 {
@@ -3444,7 +3444,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Activate DVBT specific presets
 * \param demod instance of demodulator.
 * \return DRXStatus_t.
@@ -3484,7 +3484,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Initialize channelswitch-independent settings for DVBT.
 * \param demod instance of demodulator.
 * \return DRXStatus_t.
@@ -3696,7 +3696,7 @@ error:
 }
 
 /*============================================================================*/
-/**
+/*
 * \brief start dvbt demodulating for channel.
 * \param demod instance of demodulator.
 * \return DRXStatus_t.
@@ -3732,7 +3732,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Set up dvbt demodulator for channel.
 * \param demod instance of demodulator.
 * \return DRXStatus_t.
@@ -4086,7 +4086,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Retrieve lock status .
 * \param demod    Pointer to demodulator instance.
 * \param lockStat Pointer to lock status structure.
@@ -4148,7 +4148,7 @@ static int power_up_qam(struct drxk_state *state)
 }
 
 
-/** Power Down QAM */
+/* Power Down QAM */
 static int power_down_qam(struct drxk_state *state)
 {
        u16 data = 0;
@@ -4186,7 +4186,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Setup of the QAM Measurement intervals for signal quality
 * \param demod instance of demod.
 * \param modulation current modulation.
@@ -4461,7 +4461,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief QAM32 specific setup
 * \param demod instance of demod.
 * \return DRXStatus_t.
@@ -4657,7 +4657,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief QAM64 specific setup
 * \param demod instance of demod.
 * \return DRXStatus_t.
@@ -4852,7 +4852,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief QAM128 specific setup
 * \param demod: instance of demod.
 * \return DRXStatus_t.
@@ -5049,7 +5049,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief QAM256 specific setup
 * \param demod: instance of demod.
 * \return DRXStatus_t.
@@ -5244,7 +5244,7 @@ error:
 
 
 /*============================================================================*/
-/**
+/*
 * \brief Reset QAM block.
 * \param demod:   instance of demod.
 * \param channel: pointer to channel data.
@@ -5272,7 +5272,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Set QAM symbolrate.
 * \param demod:   instance of demod.
 * \param channel: pointer to channel data.
@@ -5341,7 +5341,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Get QAM lock status.
 * \param demod:   instance of demod.
 * \param channel: pointer to channel data.
index 6aaa9c6..212e073 100644 (file)
 #define DVB_PLL_TDEE4                 18
 #define DVB_PLL_THOMSON_DTT7520X       19
 
+#if IS_REACHABLE(CONFIG_DVB_PLL)
 /**
  * Attach a dvb-pll to the supplied frontend structure.
  *
- * @param fe Frontend to attach to.
- * @param pll_addr i2c address of the PLL (if used).
- * @param i2c i2c adapter to use (set to NULL if not used).
- * @param pll_desc_id dvb_pll_desc to use.
- * @return Frontend pointer on success, NULL on failure
+ * @fe: Frontend to attach to.
+ * @pll_addr: i2c address of the PLL (if used).
+ * @i2c: i2c adapter to use (set to NULL if not used).
+ * @pll_desc_id: dvb_pll_desc to use.
+ *
+ * return: Frontend pointer on success, NULL on failure
  */
-#if IS_REACHABLE(CONFIG_DVB_PLL)
 extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
                                           int pll_addr,
                                           struct i2c_adapter *i2c,
index 3336154..c9fc81c 100644 (file)
@@ -38,6 +38,7 @@ enum helene_xtal {
  * @set_tuner_priv:    Callback function private context
  * @set_tuner_callback:        Callback function that notifies the parent driver
  *                     which tuner is active now
+ * @xtal: Cristal frequency as described by &enum helene_xtal
  */
 struct helene_config {
        u8      i2c_address;
@@ -48,9 +49,31 @@ struct helene_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_HELENE)
+/**
+ * Attach a helene tuner (terrestrial and cable standards)
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
                                        const struct helene_config *config,
                                        struct i2c_adapter *i2c);
+
+/**
+ * Attach a helene tuner (satellite standards)
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
+extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
+                                       const struct helene_config *config,
+                                       struct i2c_adapter *i2c);
 #else
 static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
                                        const struct helene_config *config,
@@ -59,13 +82,6 @@ static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
        pr_warn("%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-#endif
-
-#if IS_REACHABLE(CONFIG_DVB_HELENE)
-extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
-                                       const struct helene_config *config,
-                                       struct i2c_adapter *i2c);
-#else
 static inline struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
                                        const struct helene_config *config,
                                        struct i2c_adapter *i2c)
index 672a556..9157fd0 100644 (file)
@@ -41,6 +41,15 @@ struct horus3a_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_HORUS3A)
+/**
+ * Attach a horus3a tuner
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
                                        const struct horus3a_config *config,
                                        struct i2c_adapter *i2c);
index 534b24f..965012a 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
  *
  * Copyright (C) 2010 Malcolm Priestley
@@ -36,7 +36,7 @@ struct ix2505v_state {
        u32 frequency;
 };
 
-/**
+/*
  *  Data read format of the Sharp IX2505V B0017
  *
  *  byte1:   1   |   1   |   0   |   0   |   0   |  MA1  |  MA0  |  1
@@ -99,7 +99,7 @@ static void ix2505v_release(struct dvb_frontend *fe)
 
 }
 
-/**
+/*
  *  Data write format of the Sharp IX2505V B0017
  *
  *  byte1:   1   |   1   |   0   |   0   |   0   | 0(MA1)| 0(MA0)|  0
index 0b0a431..49ed93e 100644 (file)
 #include "dvb_frontend.h"
 
 /**
- * Attach a ix2505v tuner to the supplied frontend structure.
+ * struct ix2505v_config - ix2505 attachment configuration
  *
- * @param fe Frontend to attach to.
- * @param config ix2505v_config structure
- * @return FE pointer on success, NULL on failure.
+ * @tuner_address: tuner address
+ * @tuner_gain: Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB
+ * @tuner_chargepump: Charge pump output +/- 0=120 1=260 2=555 3=1200(default)
+ * @min_delay_ms: delay after tune
+ * @tuner_write_only: disables reads
  */
-
 struct ix2505v_config {
        u8 tuner_address;
-
-       /*Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB */
        u8 tuner_gain;
-
-       /*Charge pump output +/- 0=120 1=260 2=555 3=1200(default) */
        u8 tuner_chargepump;
-
-       /* delay after tune */
        int min_delay_ms;
-
-       /* disables reads*/
        u8 tuner_write_only;
 
 };
 
 #if IS_REACHABLE(CONFIG_DVB_IX2505V)
+/**
+ * Attach a ix2505v tuner to the supplied frontend structure.
+ *
+ * @fe: Frontend to attach to.
+ * @config: pointer to &struct ix2505v_config
+ * @i2c: pointer to &struct i2c_adapter.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
        const struct ix2505v_config *config, struct i2c_adapter *i2c);
 #else
index 68923c8..e5a6c17 100644 (file)
@@ -517,7 +517,7 @@ struct dvb_frontend* l64781_attach(const struct l64781_config* config,
        state->i2c = i2c;
        state->first = 1;
 
-       /**
+       /*
         *  the L64781 won't show up before we send the reset_and_configure()
         *  broadcast. If nothing responds there is no L64781 on the bus...
         */
index 04b355a..1a8964a 100644 (file)
  * 0x68,
  */
 
+/**
+ * enum m88ds3103_ts_mode - TS connection mode
+ * @M88DS3103_TS_SERIAL:       TS output pin D0, normal
+ * @M88DS3103_TS_SERIAL_D7:    TS output pin D7
+ * @M88DS3103_TS_PARALLEL:     TS Parallel mode
+ * @M88DS3103_TS_CI:           TS CI Mode
+ */
+enum m88ds3103_ts_mode {
+       M88DS3103_TS_SERIAL,
+       M88DS3103_TS_SERIAL_D7,
+       M88DS3103_TS_PARALLEL,
+       M88DS3103_TS_CI
+};
+
+/**
+ * enum m88ds3103_clock_out
+ * @M88DS3103_CLOCK_OUT_DISABLED:      Clock output is disabled
+ * @M88DS3103_CLOCK_OUT_ENABLED:       Clock output is enabled with crystal
+ *                                     clock.
+ * @M88DS3103_CLOCK_OUT_ENABLED_DIV2:  Clock output is enabled with half
+ *                                     crystal clock.
+ */
+enum m88ds3103_clock_out {
+       M88DS3103_CLOCK_OUT_DISABLED,
+       M88DS3103_CLOCK_OUT_ENABLED,
+       M88DS3103_CLOCK_OUT_ENABLED_DIV2
+};
+
 /**
  * struct m88ds3103_platform_data - Platform data for the m88ds3103 driver
  * @clk: Clock frequency.
  * @get_dvb_frontend: Get DVB frontend.
  * @get_i2c_adapter: Get I2C adapter.
  */
-
 struct m88ds3103_platform_data {
        u32 clk;
        u16 i2c_wr_max;
-#define M88DS3103_TS_SERIAL             0 /* TS output pin D0, normal */
-#define M88DS3103_TS_SERIAL_D7          1 /* TS output pin D7 */
-#define M88DS3103_TS_PARALLEL           2 /* TS Parallel mode */
-#define M88DS3103_TS_CI                 3 /* TS CI Mode */
-       u8 ts_mode:2;
+       enum m88ds3103_ts_mode ts_mode;
        u32 ts_clk;
+       enum m88ds3103_clock_out clk_out;
        u8 ts_clk_pol:1;
        u8 spec_inv:1;
        u8 agc;
        u8 agc_inv:1;
-#define M88DS3103_CLOCK_OUT_DISABLED        0
-#define M88DS3103_CLOCK_OUT_ENABLED         1
-#define M88DS3103_CLOCK_OUT_ENABLED_DIV2    2
-       u8 clk_out:2;
        u8 envelope_mode:1;
        u8 lnb_hv_pol:1;
        u8 lnb_en_pol:1;
@@ -73,105 +93,60 @@ struct m88ds3103_platform_data {
        u8 attach_in_use:1;
 };
 
-/*
- * Do not add new m88ds3103_attach() users! Use I2C bindings instead.
+/**
+ * struct m88ds3103_config - m88ds3102 configuration
+ *
+ * @i2c_addr:  I2C address. Default: none, must set. Example: 0x68, ...
+ * @clock:     Device's clock. Default: none, must set. Example: 27000000
+ * @i2c_wr_max: Max bytes I2C provider is asked to write at once.
+ *             Default: none, must set. Example: 33, 65, ...
+ * @ts_mode:   TS output mode, as defined by &enum m88ds3103_ts_mode.
+ *             Default: M88DS3103_TS_SERIAL.
+ * @ts_clk:    TS clk in KHz. Default: 0.
+ * @ts_clk_pol:        TS clk polarity.Default: 0.
+ *             1-active at falling edge; 0-active at rising edge.
+ * @spec_inv:  Spectrum inversion. Default: 0.
+ * @agc_inv:   AGC polarity. Default: 0.
+ * @clock_out: Clock output, as defined by &enum m88ds3103_clock_out.
+ *             Default: M88DS3103_CLOCK_OUT_DISABLED.
+ * @envelope_mode: DiSEqC envelope mode. Default: 0.
+ * @agc:       AGC configuration. Default: none, must set.
+ * @lnb_hv_pol:        LNB H/V pin polarity. Default: 0. Values:
+ *             1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18;
+ *             0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
+ * @lnb_en_pol:        LNB enable pin polarity. Default: 0. Values:
+ *             1: pin high to enable, pin low to disable;
+ *             0: pin high to disable, pin low to enable.
  */
 struct m88ds3103_config {
-       /*
-        * I2C address
-        * Default: none, must set
-        * 0x68, ...
-        */
        u8 i2c_addr;
-
-       /*
-        * clock
-        * Default: none, must set
-        * 27000000
-        */
        u32 clock;
-
-       /*
-        * max bytes I2C provider is asked to write at once
-        * Default: none, must set
-        * 33, 65, ...
-        */
        u16 i2c_wr_max;
-
-       /*
-        * TS output mode
-        * Default: M88DS3103_TS_SERIAL
-        */
-#define M88DS3103_TS_SERIAL             0 /* TS output pin D0, normal */
-#define M88DS3103_TS_SERIAL_D7          1 /* TS output pin D7 */
-#define M88DS3103_TS_PARALLEL           2 /* TS Parallel mode */
-#define M88DS3103_TS_CI                 3 /* TS CI Mode */
        u8 ts_mode;
-
-       /*
-        * TS clk in KHz
-        * Default: 0.
-        */
        u32 ts_clk;
-
-       /*
-        * TS clk polarity.
-        * Default: 0. 1-active at falling edge; 0-active at rising edge.
-        */
        u8 ts_clk_pol:1;
-
-       /*
-        * spectrum inversion
-        * Default: 0
-        */
        u8 spec_inv:1;
-
-       /*
-        * AGC polarity
-        * Default: 0
-        */
        u8 agc_inv:1;
-
-       /*
-        * clock output
-        * Default: M88DS3103_CLOCK_OUT_DISABLED
-        */
-#define M88DS3103_CLOCK_OUT_DISABLED        0
-#define M88DS3103_CLOCK_OUT_ENABLED         1
-#define M88DS3103_CLOCK_OUT_ENABLED_DIV2    2
        u8 clock_out;
-
-       /*
-        * DiSEqC envelope mode
-        * Default: 0
-        */
        u8 envelope_mode:1;
-
-       /*
-        * AGC configuration
-        * Default: none, must set
-        */
        u8 agc;
-
-       /*
-        * LNB H/V pin polarity
-        * Default: 0.
-        * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18.
-        * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
-        */
        u8 lnb_hv_pol:1;
-
-       /*
-        * LNB enable pin polarity
-        * Default: 0.
-        * 1: pin high to enable, pin low to disable.
-        * 0: pin high to disable, pin low to enable.
-        */
        u8 lnb_en_pol:1;
 };
 
 #if defined(CONFIG_DVB_M88DS3103) || \
                (defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE))
+/**
+ * Attach a m88ds3103 demod
+ *
+ * @config: pointer to &struct m88ds3103_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ * @tuner_i2c: on success, returns the I2C adapter associated with
+ *             m88ds3103 tuner.
+ *
+ * return: FE pointer on success, NULL on failure.
+ * Note: Do not add new m88ds3103_attach() users! Use I2C bindings instead.
+ */
 extern struct dvb_frontend *m88ds3103_attach(
                const struct m88ds3103_config *config,
                struct i2c_adapter *i2c,
index dfb02db..05c9725 100644 (file)
@@ -26,7 +26,6 @@
  * @demod_address:     the demodulator's i2c address
  * @is_serial:         if true, TS is serial. Otherwise, TS is parallel
  */
-
 struct mb86a20s_config {
        u32     fclk;
        u8      demod_address;
@@ -34,9 +33,17 @@ struct mb86a20s_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_MB86A20S)
+/**
+ * Attach a mb86a20s demod
+ *
+ * @config: pointer to &struct mb86a20s_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
                                           struct i2c_adapter *i2c);
-extern struct i2c_adapter *mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *);
+
 #else
 static inline struct dvb_frontend *mb86a20s_attach(
        const struct mb86a20s_config *config, struct i2c_adapter *i2c)
@@ -44,12 +51,6 @@ static inline struct dvb_frontend *mb86a20s_attach(
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-static inline struct i2c_adapter *
-       mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *fe)
-{
-       printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
-       return NULL;
-}
 #endif
 
 #endif /* MB86A20S */
index 3236325..8cd5ef6 100644 (file)
 
 #include <linux/dvb/frontend.h>
 
+/* Define old names for backward compatibility */
+#define VARIABLE_TS_CLOCK   MN88472_TS_CLK_VARIABLE
+#define FIXED_TS_CLOCK      MN88472_TS_CLK_FIXED
+#define SERIAL_TS_MODE      MN88472_TS_MODE_SERIAL
+#define PARALLEL_TS_MODE    MN88472_TS_MODE_PARALLEL
+
 /**
  * struct mn88472_config - Platform data for the mn88472 driver
  * @xtal: Clock frequency.
  * @ts_mode: TS mode.
  * @ts_clock: TS clock config.
  * @i2c_wr_max: Max number of bytes driver writes to I2C at once.
- * @get_dvb_frontend: Get DVB frontend.
+ * @fe: pointer to a frontend pointer
+ * @get_dvb_frontend: Get DVB frontend callback.
  */
-
-/* Define old names for backward compatibility */
-#define VARIABLE_TS_CLOCK   MN88472_TS_CLK_VARIABLE
-#define FIXED_TS_CLOCK      MN88472_TS_CLK_FIXED
-#define SERIAL_TS_MODE      MN88472_TS_MODE_SERIAL
-#define PARALLEL_TS_MODE    MN88472_TS_MODE_PARALLEL
-
 struct mn88472_config {
        unsigned int xtal;
 
index 0cde151..458ac94 100644 (file)
@@ -32,7 +32,6 @@
  * @pid_filter: Set PID to PID filter.
  * @pid_filter_ctrl: Control PID filter.
  */
-
 struct rtl2830_platform_data {
        u32 clk;
        bool spec_inv;
index 03c0de0..6a124ff 100644 (file)
@@ -35,7 +35,6 @@
  * @pid_filter: Set PID to PID filter.
  * @pid_filter_ctrl: Control PID filter.
  */
-
 struct rtl2832_platform_data {
        u32 clk;
        /*
index d8fc7e7..8f88c2f 100644 (file)
  * struct rtl2832_sdr_platform_data - Platform data for the rtl2832_sdr driver
  * @clk: Clock frequency (4000000, 16000000, 25000000, 28800000).
  * @tuner: Used tuner model.
- * @i2c_client: rtl2832 demod driver I2C client.
- * @bulk_read: rtl2832 driver private I/O interface.
- * @bulk_write: rtl2832 driver private I/O interface.
- * @update_bits: rtl2832 driver private I/O interface.
+ * @regmap: pointer to &struct regmap.
  * @dvb_frontend: rtl2832 DVB frontend.
  * @v4l2_subdev: Tuner v4l2 controls.
  * @dvb_usb_device: DVB USB interface for USB streaming.
  */
-
 struct rtl2832_sdr_platform_data {
        u32 clk;
        /*
index 7c511c3..d2c402b 100644 (file)
@@ -57,7 +57,7 @@ static int sp887x_writereg (struct sp887x_state* state, u16 reg, u16 data)
        int ret;
 
        if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1) {
-               /**
+               /*
                 *  in case of soft reset we ignore ACK errors...
                 */
                if (!(reg == 0xf1a && data == 0x000 &&
@@ -130,7 +130,7 @@ static void sp887x_setup_agc (struct sp887x_state* state)
 
 #define BLOCKSIZE 30
 #define FW_SIZE 0x4000
-/**
+/*
  *  load firmware and setup MPEG interface...
  */
 static int sp887x_initial_setup (struct dvb_frontend* fe, const struct firmware *fw)
@@ -279,7 +279,7 @@ static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
        return 0;
 }
 
-/**
+/*
  *  estimates division of two 24bit numbers,
  *  derived from the ves1820/stv0299 driver code
  */
index 78e75df..e94a3d5 100644 (file)
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
+#if IS_REACHABLE(CONFIG_DVB_STB6000)
 /**
  * Attach a stb6000 tuner to the supplied frontend structure.
  *
- * @param fe Frontend to attach to.
- * @param addr i2c address of the tuner.
- * @param i2c i2c adapter to use.
- * @return FE pointer on success, NULL on failure.
+ * @fe: Frontend to attach to.
+ * @addr: i2c address of the tuner.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
  */
-#if IS_REACHABLE(CONFIG_DVB_STB6000)
 extern struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
                                           struct i2c_adapter *i2c);
 #else
index b36b21a..b1f3d67 100644 (file)
@@ -368,7 +368,7 @@ static int stv0299_set_voltage(struct dvb_frontend *fe,
        reg0x08 = stv0299_readreg (state, 0x08);
        reg0x0c = stv0299_readreg (state, 0x0c);
 
-       /**
+       /*
         *  H/V switching over OP0, OP1 and OP2 are LNB power enable bits
         */
        reg0x0c &= 0x0f;
index 8f18402..da1a87b 100644 (file)
@@ -38,7 +38,6 @@
  * @tuner_i2c_addr: CX24118A tuner I2C address (0x14, 0x54, ...).
  * @get_dvb_frontend: Get DVB frontend.
  */
-
 struct tda10071_platform_data {
        u32 clk;
        u16 i2c_wr_max;
index 81abe1a..6a7bed1 100644 (file)
 /**
  * Attach a tda826x tuner to the supplied frontend structure.
  *
- * @param fe Frontend to attach to.
- * @param addr i2c address of the tuner.
- * @param i2c i2c adapter to use.
- * @param has_loopthrough Set to 1 if the card has a loopthrough RF connector.
- * @return FE pointer on success, NULL on failure.
+ * @fe: Frontend to attach to.
+ * @addr: i2c address of the tuner.
+ * @i2c: i2c adapter to use.
+ * @has_loopthrough: Set to 1 if the card has a loopthrough RF connector.
+ *
+ * return: FE pointer on success, NULL on failure.
  */
 #if IS_REACHABLE(CONFIG_DVB_TDA826X)
 extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr,
index 18e6d4c..1d41abd 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Driver for Infineon tua6100 pll.
  *
  * (c) 2006 Andrew de Quincey
index 9f15cbd..6c098a8 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Driver for Infineon tua6100 PLL.
  *
  * (c) 2006 Andrew de Quincey
index ceb2e05..6cd8f6f 100644 (file)
@@ -27,7 +27,6 @@
  * @reg_read: Register read callback.
  * @reg_write: Register write callback.
  */
-
 struct zd1301_demod_platform_data {
        void *reg_priv;
        int (*reg_read)(void *, u16, u8 *);
@@ -41,8 +40,7 @@ struct zd1301_demod_platform_data {
  *
  * Return: Pointer to DVB frontend which given platform device owns.
  */
-
-struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
+struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *pdev);
 
 /**
  * zd1301_demod_get_i2c_adapter() - Get pointer to I2C adapter
@@ -50,11 +48,16 @@ struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
  *
  * Return: Pointer to I2C adapter which given platform device owns.
  */
-
-struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *);
+struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *pdev);
 
 #else
 
+/**
+ * zd1301_demod_get_dvb_frontend() - Attach a zd1301 frontend
+ * @dev: Pointer to platform device
+ *
+ * Return: Pointer to %struct dvb_frontend or NULL if attach fails.
+ */
 static inline struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *dev)
 {
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
index 0622827..89dd65a 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Driver for Zarlink zl10036 DVB-S silicon tuner
  *
  * Copyright (C) 2006 Tino Reichardt
@@ -157,7 +157,7 @@ static int zl10036_sleep(struct dvb_frontend *fe)
        return ret;
 }
 
-/**
+/*
  * register map of the ZL10036/ZL10038
  *
  * reg[default] content
@@ -219,7 +219,7 @@ static int zl10036_set_bandwidth(struct zl10036_state *state, u32 fbw)
        if (fbw <= 28820) {
                br = _BR_MAXIMUM;
        } else {
-               /**
+               /*
                 *  f(bw)=34,6MHz f(xtal)=10.111MHz
                 *  br = (10111/34600) * 63 * 1/K = 14;
                 */
@@ -315,7 +315,7 @@ static int zl10036_set_params(struct dvb_frontend *fe)
        ||  (frequency > fe->ops.info.frequency_max))
                return -EINVAL;
 
-       /**
+       /*
         * alpha = 1.35 for dvb-s
         * fBW = (alpha*symbolrate)/(2*0.8)
         * 1.35 / (2*0.8) = 27 / 32
index 88751ad..ec90ca9 100644 (file)
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
-/**
- * Attach a zl10036 tuner to the supplied frontend structure.
- *
- * @param fe Frontend to attach to.
- * @param config zl10036_config structure
- * @return FE pointer on success, NULL on failure.
- */
-
 struct zl10036_config {
        u8 tuner_address;
        int rf_loop_enable;
 };
 
 #if IS_REACHABLE(CONFIG_DVB_ZL10036)
+/**
+ * Attach a zl10036 tuner to the supplied frontend structure.
+ *
+ * @fe: Frontend to attach to.
+ * @config: zl10036_config structure.
+ * @i2c: pointer to struct i2c_adapter.
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *zl10036_attach(struct dvb_frontend *fe,
        const struct zl10036_config *config, struct i2c_adapter *i2c);
 #else
index 3c6d642..cb5d7ff 100644 (file)
@@ -676,6 +676,7 @@ config VIDEO_OV13858
        tristate "OmniVision OV13858 sensor support"
        depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
        depends on MEDIA_CAMERA_SUPPORT
+       select V4L2_FWNODE
        ---help---
          This is a Video4Linux2 sensor-level driver for the OmniVision
          OV13858 camera.
index 1439936..9fe409e 100644 (file)
@@ -1,6 +1,7 @@
 config VIDEO_ET8EK8
        tristate "ET8EK8 camera sensor support"
        depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       select V4L2_FWNODE
        ---help---
          This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
          It is used for example in Nokia N900 (RX-51).
index 800b9bf..2f71af2 100644 (file)
@@ -1770,8 +1770,7 @@ static int imx274_probe(struct i2c_client *client,
        return 0;
 
 err_ctrls:
-       v4l2_async_unregister_subdev(sd);
-       v4l2_ctrl_handler_free(sd->ctrl_handler);
+       v4l2_ctrl_handler_free(&imx274->ctrls.handler);
 err_me:
        media_entity_cleanup(&sd->entity);
 err_regmap:
@@ -1788,7 +1787,7 @@ static int imx274_remove(struct i2c_client *client)
        imx274_write_table(imx274, mode_table[IMX274_MODE_STOP_STREAM]);
 
        v4l2_async_unregister_subdev(sd);
-       v4l2_ctrl_handler_free(sd->ctrl_handler);
+       v4l2_ctrl_handler_free(&imx274->ctrls.handler);
        media_entity_cleanup(&sd->entity);
        mutex_destroy(&imx274->lock);
        return 0;
index 251a2aa..b600e03 100644 (file)
@@ -50,6 +50,7 @@ enum led_enable {
 /**
  * struct lm3560_flash
  *
+ * @dev: pointer to &struct device
  * @pdata: platform data
  * @regmap: reg. map for i2c
  * @lock: muxtex for serial access.
index a0cd6dc..0fb457f 100644 (file)
 
 /**
  * m5mols_read_rational - I2C read of a rational number
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @addr_num: numerator register
+ * @addr_den: denominator register
+ * @val: place to store the division result
  *
  * Read numerator and denominator from registers @addr_num and @addr_den
  * respectively and return the division result in @val.
@@ -53,6 +57,7 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
 
 /**
  * m5mols_capture_info - Gather captured image information
+ * @info: M-5MOLS driver data structure
  *
  * For now it gathers only EXIF information and file size.
  */
index c2218c0..82eab7c 100644 (file)
@@ -126,6 +126,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
 
 /**
  * m5mols_do_scenemode() - Change current scenemode
+ * @info: M-5MOLS driver data structure
  * @mode:      Desired mode of the scenemode
  *
  * WARNING: The execution order is important. Do not change the order.
index 463534d..12e79f9 100644 (file)
@@ -114,7 +114,8 @@ static const struct m5mols_resolution m5mols_reg_res[] = {
 
 /**
  * m5mols_swap_byte - an byte array to integer conversion function
- * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet
+ * @data: byte array
+ * @length: size in bytes of I2C packet defined in the M-5MOLS datasheet
  *
  * Convert I2C data byte array with performing any required byte
  * reordering to assure proper values for each data type, regardless
@@ -132,8 +133,9 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
 
 /**
  * m5mols_read -  I2C read function
- * @reg: combination of size, category and command for the I2C packet
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  * @size: desired size of I2C packet
+ * @reg: combination of size, category and command for the I2C packet
  * @val: read value
  *
  * Returns 0 on success, or else negative errno.
@@ -232,6 +234,7 @@ int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
 
 /**
  * m5mols_write - I2C command write function
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  * @reg: combination of size, category and command for the I2C packet
  * @val: value to write
  *
@@ -284,6 +287,7 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
 
 /**
  * m5mols_busy_wait - Busy waiting with I2C register polling
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  * @reg: the I2C_REG() address of an 8-bit status register to check
  * @value: expected status register value
  * @mask: bit mask for the read status register value
@@ -316,6 +320,8 @@ int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask,
 
 /**
  * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @reg: combination of size, category and command for the I2C packet
  *
  * Before writing desired interrupt value the INT_FACTOR register should
  * be read to clear pending interrupts.
@@ -349,6 +355,8 @@ int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 irq_mask, u32 timeout)
 
 /**
  * m5mols_reg_mode - Write the mode and check busy status
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @mode: the required operation mode
  *
  * It always accompanies a little delay changing the M-5MOLS mode, so it is
  * needed checking current busy status to guarantee right mode.
@@ -364,6 +372,7 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
 
 /**
  * m5mols_set_mode - set the M-5MOLS controller mode
+ * @info: M-5MOLS driver data structure
  * @mode: the required operation mode
  *
  * The commands of M-5MOLS are grouped into specific modes. Each functionality
@@ -421,6 +430,7 @@ int m5mols_set_mode(struct m5mols_info *info, u8 mode)
 
 /**
  * m5mols_get_version - retrieve full revisions information of M-5MOLS
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  *
  * The version information includes revisions of hardware and firmware,
  * AutoFocus alghorithm version and the version string.
@@ -489,6 +499,7 @@ static enum m5mols_restype __find_restype(u32 code)
 
 /**
  * __find_resolution - Lookup preset and type of M-5MOLS's resolution
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  * @mf: pixel format to find/negotiate the resolution preset for
  * @type: M-5MOLS resolution type
  * @resolution:        M-5MOLS resolution preset register value
@@ -662,6 +673,7 @@ static const struct v4l2_subdev_pad_ops m5mols_pad_ops = {
 
 /**
  * m5mols_restore_controls - Apply current control values to the registers
+ * @info: M-5MOLS driver data structure
  *
  * m5mols_do_scenemode() handles all parameters for which there is yet no
  * individual control. It should be replaced at some point by setting each
@@ -686,6 +698,7 @@ int m5mols_restore_controls(struct m5mols_info *info)
 
 /**
  * m5mols_start_monitor - Start the monitor mode
+ * @info: M-5MOLS driver data structure
  *
  * Before applying the controls setup the resolution and frame rate
  * in PARAMETER mode, and then switch over to MONITOR mode.
@@ -789,6 +802,7 @@ int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
 
 /**
  * m5mols_fw_start - M-5MOLS internal ARM controller initialization
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  *
  * Execute the M-5MOLS internal ARM controller initialization sequence.
  * This function should be called after the supply voltage has been
@@ -844,6 +858,8 @@ static int m5mols_auto_focus_stop(struct m5mols_info *info)
 
 /**
  * m5mols_s_power - Main sensor power control function
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @on: if true, powers on the device; powers off otherwise.
  *
  * To prevent breaking the lens when the sensor is powered off the Soft-Landing
  * algorithm is called where available. The Soft-Landing algorithm availability
index 34179d2..da39c49 100644 (file)
@@ -428,8 +428,8 @@ static int ov5647_sensor_set_register(struct v4l2_subdev *sd,
 }
 #endif
 
-/**
- * @short Subdev core operations registration
+/*
+ * Subdev core operations registration
  */
 static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = {
        .s_power                = ov5647_sensor_power,
index 67dcca7..2e14027 100644 (file)
@@ -53,6 +53,9 @@ enum {
  * @gpio_reset: GPIO connected to the sensor's reset pin
  * @lock: mutex protecting the structure's members below
  * @format: media bus format at the sensor's source pad
+ * @clock: pointer to &struct clk.
+ * @clock_frequency: clock frequency
+ * @power_count: stores state if device is powered
  */
 struct s5k6a3 {
        struct device *dev;
index 9fd254a..13c10b5 100644 (file)
@@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client)
 
 /**
  * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
  *
  * Configure the internal ISP PLL for the required output frequency.
  * Locking: called with s5k6aa.lock mutex held.
@@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
 
 /**
  * s5k6aa_configure_video_bus - configure the video output interface
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
  * @bus_type: video bus type: parallel or MIPI-CSI
  * @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
  *
@@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
 
 /**
  * s5k6aa_set_prev_config - write user preview register set
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
+ * @preset: s5kaa preset to be applied
  *
  * Configure output resolution and color fromat, pixel clock
  * frequency range, device frame rate type and frame period range.
@@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
 
 /**
  * s5k6aa_initialize_isp - basic ISP MCU initialization
+ * @sd: pointer to V4L2 sub-device descriptor
  *
  * Configure AHB addresses for registers read/write; configure PLLs for
  * required output pixel clock. The ISP power supply needs to be already
index ad2df99..d575b3e 100644 (file)
@@ -86,6 +86,7 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
 /**
  * struct tvp514x_decoder - TVP5146/47 decoder object
  * @sd: Subdevice Slave handle
+ * @hdl: embedded &struct v4l2_ctrl_handler
  * @tvp514x_regs: copy of hw's regs with preset values.
  * @pdata: Board specific
  * @ver: Chip version
@@ -98,6 +99,9 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
  * @std_list: Standards list
  * @input: Input routing at chip level
  * @output: Output routing at chip level
+ * @pad: subdev media pad associated with the decoder
+ * @format: media bus frame format
+ * @int_seq: driver's register init sequence
  */
 struct tvp514x_decoder {
        struct v4l2_subdev sd;
@@ -211,7 +215,7 @@ static struct tvp514x_reg tvp514x_reg_list_default[] = {
        {TOK_TERM, 0, 0},
 };
 
-/**
+/*
  * List of image formats supported by TVP5146/47 decoder
  * Currently we are using 8 bit mode only, but can be
  * extended to 10/20 bit mode.
@@ -226,7 +230,7 @@ static const struct v4l2_fmtdesc tvp514x_fmt_list[] = {
        },
 };
 
-/**
+/*
  * Supported standards -
  *
  * Currently supports two standards only, need to add support for rest of the
@@ -931,7 +935,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
  * tvp514x_set_pad_format() - V4L2 decoder interface handler for set pad format
  * @sd: pointer to standard V4L2 sub-device structure
  * @cfg: pad configuration
- * @format: pointer to v4l2_subdev_format structure
+ * @fmt: pointer to v4l2_subdev_format structure
  *
  * Set pad format for the output pad
  */
@@ -1199,7 +1203,7 @@ static const struct tvp514x_reg tvp514xm_init_reg_seq[] = {
        {TOK_TERM, 0, 0},
 };
 
-/**
+/*
  * I2C Device Table -
  *
  * name - Name of the actual device/chip.
index 11829c0..509d69e 100644 (file)
@@ -82,11 +82,11 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
  * @start_addr_lo:     DMA ring buffer start address, lower part
  * @start_addr_hi:     DMA ring buffer start address, higher part
  * @size:              DMA ring buffer size register
                      Bits [0-7]:     DMA packet size, 188 bytes
                      Bits [16-23]:   packets count in block, 128 packets
                      Bits [24-31]:   blocks count, 8 blocks
*                     * Bits [0-7]:   DMA packet size, 188 bytes
*                     * Bits [16-23]: packets count in block, 128 packets
*                     * Bits [24-31]: blocks count, 8 blocks
  * @timeout:           DMA timeout in units of 8ns
                      For example, value of 375000000 equals to 3 sec
*                     For example, value of 375000000 equals to 3 sec
  * @curr_addr_lo:      Current ring buffer head address, lower part
  * @curr_addr_hi:      Current ring buffer head address, higher part
  * @stat_pkt_received: Statistic register, not tested
index d28211b..58d6b51 100644 (file)
@@ -175,7 +175,7 @@ out:
        return 0;
 }
 
-/**
+/*
  * Set channel Quality Profile (0-3).
  */
 void solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch,
index eb5a9ea..dd199bf 100644 (file)
@@ -404,6 +404,7 @@ static const struct v4l2_file_operations vip_fops = {
  * vidioc_querycap - return capabilities of device
  * @file: descriptor of device
  * @cap: contains return values
+ * @priv: unused
  *
  * the capabilities of the device are returned
  *
@@ -429,6 +430,7 @@ static int vidioc_querycap(struct file *file, void *priv,
  * vidioc_s_std - set video standard
  * @file: descriptor of device
  * @std: contains standard to be set
+ * @priv: unused
  *
  * the video standard is set
  *
@@ -466,6 +468,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id std)
 /**
  * vidioc_g_std - get video standard
  * @file: descriptor of device
+ * @priv: unused
  * @std: contains return values
  *
  * the current video standard is returned
@@ -483,6 +486,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
 /**
  * vidioc_querystd - get possible video standards
  * @file: descriptor of device
+ * @priv: unused
  * @std: contains return values
  *
  * all possible video standards are returned
@@ -512,6 +516,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
 /**
  * vidioc_s_input - set input line
  * @file: descriptor of device
+ * @priv: unused
  * @i: new input line number
  *
  * the current active input line is set
@@ -538,6 +543,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
 /**
  * vidioc_g_input - return input line
  * @file: descriptor of device
+ * @priv: unused
  * @i: returned input line number
  *
  * the current active input line is returned
@@ -554,6 +560,8 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
 
 /**
  * vidioc_enum_fmt_vid_cap - return video capture format
+ * @file: descriptor of device
+ * @priv: unused
  * @f: returned format information
  *
  * returns name and format of video capture
@@ -577,6 +585,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
 /**
  * vidioc_try_fmt_vid_cap - set video capture format
  * @file: descriptor of device
+ * @priv: unused
  * @f: new format
  *
  * new video format is set which includes width and
@@ -639,6 +648,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
 /**
  * vidioc_s_fmt_vid_cap - set current video format parameters
  * @file: descriptor of device
+ * @priv: unused
  * @f: returned format information
  *
  * set new capture format
@@ -706,6 +716,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
 /**
  * vidioc_g_fmt_vid_cap - get current video format parameters
  * @file: descriptor of device
+ * @priv: unused
  * @f: contains format information
  *
  * returns current video format parameters
index 7439db2..82ff9c9 100644 (file)
 #include "tw68.h"
 
 /**
- *  @rp                pointer to current risc program position
- *  @sglist    pointer to "scatter-gather list" of buffer pointers
- *  @offset    offset to target memory buffer
- *  @sync_line 0 -> no sync, 1 -> odd sync, 2 -> even sync
- *  @bpl       number of bytes per scan line
- *  @padding   number of bytes of padding to add
- *  @lines     number of lines in field
- *  @jump      insert a jump at the start
+ * tw68_risc_field
+ *  @rp:       pointer to current risc program position
+ *  @sglist:   pointer to "scatter-gather list" of buffer pointers
+ *  @offset:   offset to target memory buffer
+ *  @sync_line:        0 -> no sync, 1 -> odd sync, 2 -> even sync
+ *  @bpl:      number of bytes per scan line
+ *  @padding:  number of bytes of padding to add
+ *  @lines:    number of lines in field
+ *  @jump:     insert a jump at the start
  */
 static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
                            unsigned int offset, u32 sync_line,
@@ -120,18 +121,18 @@ static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
  *     memory for the dma controller "program" and then fills in that
  *     memory with the appropriate "instructions".
  *
- *     @pci_dev        structure with info about the pci
+ *     @pci:           structure with info about the pci
  *                     slot which our device is in.
- *     @risc           structure with info about the memory
+ *     @buf:           structure with info about the memory
  *                     used for our controller program.
- *     @sglist         scatter-gather list entry
- *     @top_offset     offset within the risc program area for the
+ *     @sglist:        scatter-gather list entry
+ *     @top_offset:    offset within the risc program area for the
  *                     first odd frame line
- *     @bottom_offset  offset within the risc program area for the
+ *     @bottom_offset: offset within the risc program area for the
  *                     first even frame line
- *     @bpl            number of data bytes per scan line
- *     @padding        number of extra bytes to add at end of line
- *     @lines          number of scan lines
+ *     @bpl:           number of data bytes per scan line
+ *     @padding:       number of extra bytes to add at end of line
+ *     @lines:         number of scan lines
  */
 int tw68_risc_buffer(struct pci_dev *pci,
                        struct tw68_buf *buf,
index 07e89a4..16352e2 100644 (file)
@@ -47,8 +47,9 @@ EXPORT_SYMBOL_GPL(vpif_lock);
 void __iomem *vpif_base;
 EXPORT_SYMBOL_GPL(vpif_base);
 
-/**
+/*
  * vpif_ch_params: video standard configuration parameters for vpif
+ *
  * The table must include all presets from supported subdevices.
  */
 const struct vpif_channel_config_params vpif_ch_params[] = {
index a89367a..fca4dc8 100644 (file)
@@ -109,7 +109,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
  * @vq: vb2_queue ptr
  * @nbuffers: ptr to number of buffers requested by application
  * @nplanes:: contains number of distinct video planes needed to hold a frame
- * @sizes[]: contains the size (in bytes) of each plane.
+ * @sizes: contains the size (in bytes) of each plane.
  * @alloc_devs: ptr to allocation context
  *
  * This callback function is called when reqbuf() is called to adjust
@@ -167,7 +167,7 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
 
 /**
  * vpif_start_streaming : Starts the DMA engine for streaming
- * @vb: ptr to vb2_buffer
+ * @vq: ptr to vb2_buffer
  * @count: number of buffers
  */
 static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -629,7 +629,7 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
 
 /**
  * vpif_get_default_field() - Get default field type based on interface
- * @vpif_params - ptr to vpif params
+ * @iface: ptr to vpif interface
  */
 static inline enum v4l2_field vpif_get_default_field(
                                struct vpif_interface *iface)
@@ -640,8 +640,8 @@ static inline enum v4l2_field vpif_get_default_field(
 
 /**
  * vpif_config_addr() - function to configure buffer address in vpif
- * @ch - channel ptr
- * @muxmode - channel mux mode
+ * @ch: channel ptr
+ * @muxmode: channel mux mode
  */
 static void vpif_config_addr(struct channel_obj *ch, int muxmode)
 {
@@ -661,9 +661,9 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
 
 /**
  * vpif_input_to_subdev() - Maps input to sub device
- * @vpif_cfg - global config ptr
- * @chan_cfg - channel config ptr
- * @input_index - Given input index from application
+ * @vpif_cfg: global config ptr
+ * @chan_cfg: channel config ptr
+ * @input_index: Given input index from application
  *
  * lookup the sub device information for a given input index.
  * we report all the inputs to application. inputs table also
@@ -699,9 +699,9 @@ static int vpif_input_to_subdev(
 
 /**
  * vpif_set_input() - Select an input
- * @vpif_cfg - global config ptr
- * @ch - channel
- * @_index - Given input index from application
+ * @vpif_cfg: global config ptr
+ * @ch: channel
+ * @index: Given input index from application
  *
  * Select the given input.
  */
@@ -792,7 +792,7 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
  * vpif_g_std() - get STD handler
  * @file: file ptr
  * @priv: file handle
- * @std_id: ptr to std id
+ * @std: ptr to std id
  */
 static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
 {
@@ -933,7 +933,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
  * vpif_enum_fmt_vid_cap() - ENUM_FMT handler
  * @file: file ptr
  * @priv: file handle
- * @index: input index
+ * @fmt: ptr to V4L2 format descriptor
  */
 static int vpif_enum_fmt_vid_cap(struct file *file, void  *priv,
                                        struct v4l2_fmtdesc *fmt)
@@ -1745,6 +1745,7 @@ static int vpif_remove(struct platform_device *device)
 #ifdef CONFIG_PM_SLEEP
 /**
  * vpif_suspend: vpif device suspend
+ * @dev: pointer to &struct device
  */
 static int vpif_suspend(struct device *dev)
 {
index ff2f75a..7be6362 100644 (file)
@@ -102,7 +102,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
  * @vq: vb2_queue ptr
  * @nbuffers: ptr to number of buffers requested by application
  * @nplanes:: contains number of distinct video planes needed to hold a frame
- * @sizes[]: contains the size (in bytes) of each plane.
+ * @sizes: contains the size (in bytes) of each plane.
  * @alloc_devs: ptr to allocation context
  *
  * This callback function is called when reqbuf() is called to adjust
@@ -158,7 +158,7 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
 
 /**
  * vpif_start_streaming : Starts the DMA engine for streaming
- * @vb: ptr to vb2_buffer
+ * @vq: ptr to vb2_buffer
  * @count: number of buffers
  */
 static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -766,9 +766,9 @@ static int vpif_enum_output(struct file *file, void *fh,
 
 /**
  * vpif_output_to_subdev() - Maps output to sub device
- * @vpif_cfg - global config ptr
- * @chan_cfg - channel config ptr
- * @index - Given output index from application
+ * @vpif_cfg: global config ptr
+ * @chan_cfg: channel config ptr
+ * @index: Given output index from application
  *
  * lookup the sub device information for a given output index.
  * we report all the output to application. output table also
@@ -802,9 +802,9 @@ vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
 
 /**
  * vpif_set_output() - Select an output
- * @vpif_cfg - global config ptr
- * @ch - channel
- * @index - Given output index from application
+ * @vpif_cfg: global config ptr
+ * @ch: channel
+ * @index: Given output index from application
  *
  * Select the given output.
  */
index 948fe01..ed9302c 100644 (file)
@@ -146,6 +146,7 @@ static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
 
 /**
  * fimc_capture_config_update - apply the camera interface configuration
+ * @ctx: FIMC capture context
  *
  * To be called from within the interrupt handler with fimc.slock
  * spinlock held. It updates the camera pixel crop, rotation and
@@ -858,6 +859,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
  * fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters
  * @sensor: pointer to the sensor subdev
  * @plane_fmt: provides plane sizes corresponding to the frame layout entries
+ * @num_planes: number of planes
  * @try: true to set the frame parameters, false to query only
  *
  * This function is used by this driver only for compressed/blob data formats.
@@ -1101,6 +1103,7 @@ static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i)
 /**
  * fimc_pipeline_validate - check for formats inconsistencies
  *                          between source and sink pad of each link
+ * @fimc:      the FIMC device this context applies to
  *
  * Return 0 if all formats match or -EPIPE otherwise.
  */
index c15596b..0ef583c 100644 (file)
@@ -60,6 +60,7 @@ static void __setup_sensor_notification(struct fimc_md *fmd,
 
 /**
  * fimc_pipeline_prepare - update pipeline information with subdevice pointers
+ * @p: fimc pipeline
  * @me: media entity terminating the pipeline
  *
  * Caller holds the graph mutex.
@@ -151,8 +152,8 @@ static int __subdev_set_power(struct v4l2_subdev *sd, int on)
 
 /**
  * fimc_pipeline_s_power - change power state of all pipeline subdevs
- * @fimc: fimc device terminating the pipeline
- * @state: true to power on, false to power off
+ * @p: fimc device terminating the pipeline
+ * @on: true to power on, false to power off
  *
  * Needs to be called with the graph mutex held.
  */
@@ -219,6 +220,7 @@ static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
 /**
  * __fimc_pipeline_open - update the pipeline information, enable power
  *                        of all pipeline subdevs and the sensor clock
+ * @ep: fimc device terminating the pipeline
  * @me: media entity to start graph walk with
  * @prepare: true to walk the current pipeline and acquire all subdevs
  *
@@ -252,7 +254,7 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
 
 /**
  * __fimc_pipeline_close - disable the sensor clock and pipeline power
- * @fimc: fimc device terminating the pipeline
+ * @ep: fimc device terminating the pipeline
  *
  * Disable power of all subdevs and turn the external sensor clock off.
  */
@@ -281,7 +283,7 @@ static int __fimc_pipeline_close(struct exynos_media_pipeline *ep)
 
 /**
  * __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs
- * @pipeline: video pipeline structure
+ * @ep: video pipeline structure
  * @on: passed as the s_stream() callback argument
  */
 static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
@@ -902,6 +904,7 @@ static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
 
 /**
  * fimc_md_create_links - create default links between registered entities
+ * @fmd: fimc media device
  *
  * Parallel interface sensor entities are connected directly to FIMC capture
  * entities. The sensors using MIPI CSIS bus are connected through immutable
index 560aada..cba46a6 100644 (file)
@@ -189,7 +189,7 @@ struct csis_drvdata {
  * @irq: requested s5p-mipi-csis irq number
  * @interrupt_mask: interrupt mask of the all used interrupts
  * @flags: the state variable for power and streaming control
- * @clock_frequency: device bus clock frequency
+ * @clk_frequency: device bus clock frequency
  * @hs_settle: HS-RX settle time
  * @num_lanes: number of MIPI-CSI data lanes used
  * @max_num_lanes: maximum number of MIPI-CSI data lanes supported
index b7731b1..aa3ce41 100644 (file)
@@ -59,6 +59,7 @@ struct h264_fb {
  * @read_idx  : read index
  * @write_idx : write index
  * @count     : buffer count in list
+ * @reserved  : for 8 bytes alignment
  */
 struct h264_ring_fb_list {
        struct h264_fb fb_list[H264_MAX_FB_NUM];
index b9fad6a..3e84a76 100644 (file)
@@ -155,7 +155,6 @@ struct vdec_vp8_vpu_inst {
  * @reg_base              : HW register base address
  * @frm_cnt               : decode frame count
  * @ctx                           : V4L2 context
- * @dev                           : platform device
  * @vpu                           : VPU instance for decoder
  * @vsi                           : VPU share information
  */
index 4eb3be3..6cf31b3 100644 (file)
@@ -34,7 +34,7 @@ static const char h264_filler_marker[] = {0x0, 0x0, 0x0, 0x1, 0xc};
 #define H264_FILLER_MARKER_SIZE ARRAY_SIZE(h264_filler_marker)
 #define VENC_PIC_BITSTREAM_BYTE_CNT 0x0098
 
-/**
+/*
  * enum venc_h264_vpu_work_buf - h264 encoder buffer index
  */
 enum venc_h264_vpu_work_buf {
@@ -50,7 +50,7 @@ enum venc_h264_vpu_work_buf {
        VENC_H264_VPU_WORK_BUF_MAX,
 };
 
-/**
+/*
  * enum venc_h264_bs_mode - for bs_mode argument in h264_enc_vpu_encode
  */
 enum venc_h264_bs_mode {
index acb639c..957420d 100644 (file)
@@ -34,7 +34,7 @@
 /* This ac_tag is vp8 frame tag. */
 #define MAX_AC_TAG_SIZE 10
 
-/**
+/*
  * enum venc_vp8_vpu_work_buf - vp8 encoder buffer index
  */
 enum venc_vp8_vpu_work_buf {
index 853d598..1ff6a93 100644 (file)
@@ -181,6 +181,7 @@ struct share_obj {
  * @extmem:            VPU extended memory information
  * @reg:               VPU TCM and configuration registers
  * @run:               VPU initialization status
+ * @wdt:               VPU watchdog workqueue
  * @ipi_desc:          VPU IPI descriptor
  * @recv_buf:          VPU DTCM share buffer for receiving. The
  *                     receive buffer is only accessed in interrupt context.
@@ -194,7 +195,7 @@ struct share_obj {
  *                     suppose a client is using VPU to decode VP8.
  *                     If the other client wants to encode VP8,
  *                     it has to wait until VP8 decode completes.
- * @wdt_refcnt         WDT reference count to make sure the watchdog can be
+ * @wdt_refcnt:                WDT reference count to make sure the watchdog can be
  *                     disabled if no other client is using VPU service
  * @ack_wq:            The wait queue for each codec and mdp. When sleeping
  *                     processes wake up, they will check the condition
index 9d3f0cb..295f34a 100644 (file)
@@ -235,6 +235,7 @@ enum pxa_mbus_layout {
  *                     stored in memory in the following way:
  * @packing:           Type of sample-packing, that has to be used
  * @order:             Sample order when storing in memory
+ * @layout:            Planes layout in memory
  * @bits_per_sample:   How many bits the bridge has to sample
  */
 struct pxa_mbus_pixelfmt {
@@ -852,10 +853,10 @@ static void pxa_camera_dma_irq_v(void *data)
 /**
  * pxa_init_dma_channel - init dma descriptors
  * @pcdev: pxa camera device
- * @vb: videobuffer2 buffer
- * @dma: dma video buffer
+ * @buf: pxa camera buffer
  * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
- * @cibr: camera Receive Buffer Register
+ * @sg: dma scatter list
+ * @sglen: dma scatter list length
  *
  * Prepares the pxa dma descriptors to transfer one camera channel.
  *
@@ -1010,6 +1011,8 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
 /**
  * pxa_camera_check_link_miss - check missed DMA linking
  * @pcdev: camera device
+ * @last_submitted: an opaque DMA cookie for last submitted
+ * @last_issued: an opaque DMA cookie for last issued
  *
  * The DMA chaining is done with DMA running. This means a tiny temporal window
  * remains, where a buffer is queued on the chain, while the chain is already
index 3245bc4..b13dec3 100644 (file)
@@ -1132,7 +1132,7 @@ static int fdp1_device_process(struct fdp1_ctx *ctx)
  * mem2mem callbacks
  */
 
-/**
+/*
  * job_ready() - check whether an instance is ready to be scheduled to run
  */
 static int fdp1_m2m_job_ready(void *priv)
index 070bac3..f6092ae 100644 (file)
@@ -257,7 +257,7 @@ struct jpu_fmt {
 };
 
 /**
- * jpu_q_data - parameters of one queue
+ * struct jpu_q_data - parameters of one queue
  * @fmtinfo: driver-specific format of this queue
  * @format: multiplanar format of this queue
  * @sequence: sequence number
@@ -269,7 +269,7 @@ struct jpu_q_data {
 };
 
 /**
- * jpu_ctx - the device context data
+ * struct jpu_ctx - the device context data
  * @jpu: JPEG IP device for this context
  * @encoder: compression (encode) operation or decompression (decode)
  * @compr_quality: destination image quality in compression (encode) mode
index c4ab639..79bc0ef 100644 (file)
@@ -103,6 +103,7 @@ static const struct camif_fmt camif_formats[] = {
 
 /**
  * s3c_camif_find_format() - lookup camif color format by fourcc or an index
+ * @vp: video path (DMA) description (codec/preview)
  * @pixelformat: fourcc to match, ignored if null
  * @index: index to the camif_formats array, ignored if negative
  */
index 15a562a..dedc1b0 100644 (file)
@@ -267,7 +267,7 @@ static void sh_veu_process(struct sh_veu_dev *veu,
        sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
 }
 
-/**
+/*
  * sh_veu_device_run() - prepares and starts the device
  *
  * This will be called by the framework when it decides to schedule a particular
index 0116097..270ec61 100644 (file)
@@ -306,16 +306,17 @@ update_cache:
 }
 
 /**
- * @icd                - soc-camera device
- * @rect       - camera cropping window
- * @subrect    - part of rect, sent to the user
- * @mf         - in- / output camera output window
- * @width      - on input: max host input width
- *               on output: user width, mapped back to input
- * @height     - on input: max host input height
- *               on output: user height, mapped back to input
- * @host_can_scale - host can scale this pixel format
- * @shift      - shift, used for scaling
+ * soc_camera_client_scale
+ * @icd:               soc-camera device
+ * @rect:              camera cropping window
+ * @subrect:           part of rect, sent to the user
+ * @mf:                        in- / output camera output window
+ * @width:             on input: max host input width;
+ *                     on output: user width, mapped back to input
+ * @height:            on input: max host input height;
+ *                     on output: user height, mapped back to input
+ * @host_can_scale:    host can scale this pixel format
+ * @shift:             shift, used for scaling
  */
 int soc_camera_client_scale(struct soc_camera_device *icd,
                        struct v4l2_rect *rect, struct v4l2_rect *subrect,
index a7e5eed..17f1eb0 100644 (file)
@@ -134,7 +134,7 @@ enum hva_h264_sei_payload_type {
        SEI_FRAME_PACKING_ARRANGEMENT = 45
 };
 
-/**
+/*
  * stereo Video Info struct
  */
 struct hva_h264_stereo_video_sei {
@@ -146,7 +146,9 @@ struct hva_h264_stereo_video_sei {
        u8 right_view_self_contained_flag;
 };
 
-/**
+/*
+ * struct hva_h264_td
+ *
  * @frame_width: width in pixels of the buffer containing the input frame
  * @frame_height: height in pixels of the buffer containing the input frame
  * @frame_num: the parameter to be written in the slice header
@@ -352,7 +354,9 @@ struct hva_h264_td {
        u32 addr_brc_in_out_parameter;
 };
 
-/**
+/*
+ * struct hva_h264_slice_po
+ *
  * @ slice_size: slice size
  * @ slice_start_time: start time
  * @ slice_stop_time: stop time
@@ -365,7 +369,9 @@ struct hva_h264_slice_po {
        u32 slice_num;
 };
 
-/**
+/*
+ * struct hva_h264_po
+ *
  * @ bitstream_size: bitstream size
  * @ dct_bitstream_size: dtc bitstream size
  * @ stuffing_bits: number of stuffing bits inserted by the encoder
@@ -391,7 +397,9 @@ struct hva_h264_task {
        struct hva_h264_po po;
 };
 
-/**
+/*
+ * struct hva_h264_ctx
+ *
  * @seq_info:  sequence information buffer
  * @ref_frame: reference frame buffer
  * @rec_frame: reconstructed frame buffer
index 45bd105..e395aa8 100644 (file)
@@ -926,7 +926,7 @@ static struct vpe_ctx *file2ctx(struct file *file)
  * mem2mem callbacks
  */
 
-/**
+/*
  * job_ready() - check whether an instance is ready to be scheduled to run
  */
 static int job_ready(void *priv)
index 7bf9fa2..065483e 100644 (file)
@@ -343,7 +343,7 @@ static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
  * mem2mem callbacks
  */
 
-/**
+/*
  * job_ready() - check whether an instance is ready to be scheduled to run
  */
 static int job_ready(void *priv)
index 8b5cbb6..4257451 100644 (file)
@@ -70,6 +70,7 @@ struct vsp1_dl_body {
  * @dma: DMA address for the header
  * @body0: first display list body
  * @fragments: list of extra display list bodies
+ * @has_chain: if true, indicates that there's a partition chain
  * @chain: entry in the display list partition chain
  */
 struct vsp1_dl_list {
index 271f725..540ac88 100644 (file)
@@ -158,7 +158,7 @@ enum si476x_ctrl_idx {
 };
 static struct v4l2_ctrl_config si476x_ctrls[] = {
 
-       /**
+       /*
         * SI476X during its station seeking(or tuning) process uses several
         * parameters to detrmine if "the station" is valid:
         *
@@ -197,7 +197,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
                .step   = 2,
        },
 
-       /**
+       /*
         * #V4L2_CID_SI476X_HARMONICS_COUNT -- number of harmonics
         * built-in power-line noise supression filter is to reject
         * during AM-mode operation.
@@ -213,7 +213,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
                .step   = 1,
        },
 
-       /**
+       /*
         * #V4L2_CID_SI476X_DIVERSITY_MODE -- configuration which
         * two tuners working in diversity mode are to work in.
         *
@@ -237,7 +237,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
                .max    = ARRAY_SIZE(phase_diversity_modes) - 1,
        },
 
-       /**
+       /*
         * #V4L2_CID_SI476X_INTERCHIP_LINK -- inter-chip link in
         * diversity mode indicator. Allows user to determine if two
         * chips working in diversity mode have established a link
@@ -296,11 +296,15 @@ struct si476x_radio_ops {
 /**
  * struct si476x_radio - radio device
  *
- * @core: Pointer to underlying core device
+ * @v4l2dev: Pointer to V4L2 device created by V4L2 subsystem
  * @videodev: Pointer to video device created by V4L2 subsystem
+ * @ctrl_handler: V4L2 controls handler
+ * @core: Pointer to underlying core device
  * @ops: Vtable of functions. See struct si476x_radio_ops for details
- * @kref: Reference counter
- * @core_lock: An r/w semaphore to brebvent the deletion of underlying
+ * @debugfs: pointer to &strucd dentry for debugfs
+ * @audmode: audio mode, as defined for the rxsubchans field
+ *          at videodev2.h
+ *
  * core structure is the radio device is being used
  */
 struct si476x_radio {
index 903fcd5..3cbdc08 100644 (file)
@@ -1330,7 +1330,7 @@ static int wl1273_fm_vidioc_s_input(struct file *file, void *priv,
 
 /**
  * wl1273_fm_set_tx_power() -  Set the transmission power value.
- * @core:                      A pointer to the device struct.
+ * @radio:                     A pointer to the device struct.
  * @power:                     The new power value.
  */
 static int wl1273_fm_set_tx_power(struct wl1273_device *radio, u16 power)
index f54bc5d..ec4ded8 100644 (file)
@@ -339,7 +339,7 @@ static void img_ir_decoder_preprocess(struct img_ir_decoder *decoder)
 /**
  * img_ir_decoder_convert() - Generate internal timings in decoder.
  * @decoder:   Decoder to be converted to internal timings.
- * @timings:   Timing register values.
+ * @reg_timings: Timing register values.
  * @clock_hz:  IR clock rate in Hz.
  *
  * Fills out the repeat timings and timing register values for a specific clock
index b25b35b..eb943e8 100644 (file)
@@ -492,7 +492,7 @@ static void free_imon_context(struct imon_context *ictx)
        dev_dbg(dev, "%s: iMON context freed\n", __func__);
 }
 
-/**
+/*
  * Called when the Display device (e.g. /dev/lcd0)
  * is opened by the application.
  */
@@ -542,7 +542,7 @@ exit:
        return retval;
 }
 
-/**
+/*
  * Called when the display device (e.g. /dev/lcd0)
  * is closed by the application.
  */
@@ -575,7 +575,7 @@ static int display_close(struct inode *inode, struct file *file)
        return retval;
 }
 
-/**
+/*
  * Sends a packet to the device -- this function must be called with
  * ictx->lock held, or its unlock/lock sequence while waiting for tx
  * to complete can/will lead to a deadlock.
@@ -664,7 +664,7 @@ static int send_packet(struct imon_context *ictx)
        return retval;
 }
 
-/**
+/*
  * Sends an associate packet to the iMON 2.4G.
  *
  * This might not be such a good idea, since it has an id collision with
@@ -694,7 +694,7 @@ static int send_associate_24g(struct imon_context *ictx)
        return retval;
 }
 
-/**
+/*
  * Sends packets to setup and show clock on iMON display
  *
  * Arguments: year - last 2 digits of year, month - 1..12,
@@ -781,7 +781,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
        return retval;
 }
 
-/**
+/*
  * These are the sysfs functions to handle the association on the iMON 2.4G LT.
  */
 static ssize_t show_associate_remote(struct device *d,
@@ -823,7 +823,7 @@ static ssize_t store_associate_remote(struct device *d,
        return count;
 }
 
-/**
+/*
  * sysfs functions to control internal imon clock
  */
 static ssize_t show_imon_clock(struct device *d,
@@ -923,7 +923,7 @@ static const struct attribute_group imon_rf_attr_group = {
        .attrs = imon_rf_sysfs_entries
 };
 
-/**
+/*
  * Writes data to the VFD.  The iMON VFD is 2x16 characters
  * and requires data in 5 consecutive USB interrupt packets,
  * each packet but the last carrying 7 bytes.
@@ -1008,7 +1008,7 @@ exit:
        return (!retval) ? n_bytes : retval;
 }
 
-/**
+/*
  * Writes data to the LCD.  The iMON OEM LCD screen expects 8-byte
  * packets. We accept data as 16 hexadecimal digits, followed by a
  * newline (to make it easy to drive the device from a command-line
@@ -1066,7 +1066,7 @@ exit:
        return (!retval) ? n_bytes : retval;
 }
 
-/**
+/*
  * Callback function for USB core API: transmit data
  */
 static void usb_tx_callback(struct urb *urb)
@@ -1087,7 +1087,7 @@ static void usb_tx_callback(struct urb *urb)
        complete(&ictx->tx.finished);
 }
 
-/**
+/*
  * report touchscreen input
  */
 static void imon_touch_display_timeout(struct timer_list *t)
@@ -1103,7 +1103,7 @@ static void imon_touch_display_timeout(struct timer_list *t)
        input_sync(ictx->touch);
 }
 
-/**
+/*
  * iMON IR receivers support two different signal sets -- those used by
  * the iMON remotes, and those used by the Windows MCE remotes (which is
  * really just RC-6), but only one or the other at a time, as the signals
@@ -1191,7 +1191,7 @@ static inline int tv2int(const struct timeval *a, const struct timeval *b)
        return sec;
 }
 
-/**
+/*
  * The directional pad behaves a bit differently, depending on whether this is
  * one of the older ffdc devices or a newer device. Newer devices appear to
  * have a higher resolution matrix for more precise mouse movement, but it
@@ -1543,7 +1543,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
        }
 }
 
-/**
+/*
  * figure out if these is a press or a release. We don't actually
  * care about repeats, as those will be auto-generated within the IR
  * subsystem for repeating scancodes.
@@ -1592,10 +1592,10 @@ static int imon_parse_press_type(struct imon_context *ictx,
        return press_type;
 }
 
-/**
+/*
  * Process the incoming packet
  */
-/**
+/*
  * Convert bit count to time duration (in us) and submit
  * the value to lirc_dev.
  */
@@ -1608,7 +1608,7 @@ static void submit_data(struct imon_context *context)
        ir_raw_event_store_with_filter(context->rdev, &ev);
 }
 
-/**
+/*
  * Process the incoming packet
  */
 static void imon_incoming_ir_raw(struct imon_context *context,
@@ -1831,7 +1831,7 @@ not_input_data:
        }
 }
 
-/**
+/*
  * Callback function for USB core API: receive data
  */
 static void usb_rx_callback_intf0(struct urb *urb)
@@ -2485,7 +2485,7 @@ static void imon_init_display(struct imon_context *ictx,
 
 }
 
-/**
+/*
  * Callback function for USB core API: Probe
  */
 static int imon_probe(struct usb_interface *interface,
@@ -2583,7 +2583,7 @@ fail:
        return ret;
 }
 
-/**
+/*
  * Callback function for USB core API: disconnect
  */
 static void imon_disconnect(struct usb_interface *interface)
index e2bd68c..22c8aee 100644 (file)
@@ -39,7 +39,7 @@ enum jvc_state {
 /**
  * ir_jvc_decode() - Decode one JVC pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:   the struct ir_raw_event descriptor of the pulse/space
+ * @ev:   the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
index 8f2f374..4fd4521 100644 (file)
@@ -25,8 +25,8 @@
 /**
  * ir_lirc_decode() - Send raw IR data to lirc_dev to be relayed to the
  *                   lircd userspace daemon for decoding.
- * @input_dev: the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @dev:       the struct rc_dev descriptor of the device
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the lirc interfaces aren't wired up.
  */
index a95d09a..6880c19 100644 (file)
@@ -41,7 +41,7 @@ enum nec_state {
 /**
  * ir_nec_decode() - Decode one NEC pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
@@ -183,7 +183,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
  * ir_nec_scancode_to_raw() - encode an NEC scancode ready for modulation.
  * @protocol:  specific protocol to use
  * @scancode:  a single NEC scancode.
- * @raw:       raw data to be modulated.
  */
 static u32 ir_nec_scancode_to_raw(enum rc_proto protocol, u32 scancode)
 {
index 758c609..d94e07b 100644 (file)
@@ -48,7 +48,7 @@ enum sanyo_state {
 /**
  * ir_sanyo_decode() - Decode one SANYO pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
index 129b558..7140dd6 100644 (file)
@@ -39,7 +39,7 @@ enum sharp_state {
 /**
  * ir_sharp_decode() - Decode one Sharp pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
index 6f464be..712bc6d 100644 (file)
@@ -35,7 +35,7 @@ enum xmp_state {
 /**
  * ir_xmp_decode() - Decode one XMP pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
index f6e5ba4..d78483a 100644 (file)
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
 /**
  * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
  * @dev:       the struct rc_dev device descriptor
- * @type:      the type of the event that has occurred
+ * @ev:                the event that has occurred
  *
  * This routine (which may be called from an interrupt context) works
  * in similar manner to ir_raw_event_store_edge.
index 17950e2..c144b77 100644 (file)
@@ -39,41 +39,41 @@ static const struct {
        [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 },
        [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 },
        [RC_PROTO_RC5] = { .name = "rc-5",
-               .scancode_bits = 0x1f7f, .repeat_period = 164 },
+               .scancode_bits = 0x1f7f, .repeat_period = 250 },
        [RC_PROTO_RC5X_20] = { .name = "rc-5x-20",
-               .scancode_bits = 0x1f7f3f, .repeat_period = 164 },
+               .scancode_bits = 0x1f7f3f, .repeat_period = 250 },
        [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz",
-               .scancode_bits = 0x2fff, .repeat_period = 164 },
+               .scancode_bits = 0x2fff, .repeat_period = 250 },
        [RC_PROTO_JVC] = { .name = "jvc",
                .scancode_bits = 0xffff, .repeat_period = 250 },
        [RC_PROTO_SONY12] = { .name = "sony-12",
-               .scancode_bits = 0x1f007f, .repeat_period = 100 },
+               .scancode_bits = 0x1f007f, .repeat_period = 250 },
        [RC_PROTO_SONY15] = { .name = "sony-15",
-               .scancode_bits = 0xff007f, .repeat_period = 100 },
+               .scancode_bits = 0xff007f, .repeat_period = 250 },
        [RC_PROTO_SONY20] = { .name = "sony-20",
-               .scancode_bits = 0x1fff7f, .repeat_period = 100 },
+               .scancode_bits = 0x1fff7f, .repeat_period = 250 },
        [RC_PROTO_NEC] = { .name = "nec",
-               .scancode_bits = 0xffff, .repeat_period = 160 },
+               .scancode_bits = 0xffff, .repeat_period = 250 },
        [RC_PROTO_NECX] = { .name = "nec-x",
-               .scancode_bits = 0xffffff, .repeat_period = 160 },
+               .scancode_bits = 0xffffff, .repeat_period = 250 },
        [RC_PROTO_NEC32] = { .name = "nec-32",
-               .scancode_bits = 0xffffffff, .repeat_period = 160 },
+               .scancode_bits = 0xffffffff, .repeat_period = 250 },
        [RC_PROTO_SANYO] = { .name = "sanyo",
                .scancode_bits = 0x1fffff, .repeat_period = 250 },
        [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd",
-               .scancode_bits = 0xffff, .repeat_period = 150 },
+               .scancode_bits = 0xffff, .repeat_period = 250 },
        [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse",
-               .scancode_bits = 0x1fffff, .repeat_period = 150 },
+               .scancode_bits = 0x1fffff, .repeat_period = 250 },
        [RC_PROTO_RC6_0] = { .name = "rc-6-0",
-               .scancode_bits = 0xffff, .repeat_period = 164 },
+               .scancode_bits = 0xffff, .repeat_period = 250 },
        [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20",
-               .scancode_bits = 0xfffff, .repeat_period = 164 },
+               .scancode_bits = 0xfffff, .repeat_period = 250 },
        [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24",
-               .scancode_bits = 0xffffff, .repeat_period = 164 },
+               .scancode_bits = 0xffffff, .repeat_period = 250 },
        [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32",
-               .scancode_bits = 0xffffffff, .repeat_period = 164 },
+               .scancode_bits = 0xffffffff, .repeat_period = 250 },
        [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce",
-               .scancode_bits = 0xffff7fff, .repeat_period = 164 },
+               .scancode_bits = 0xffff7fff, .repeat_period = 250 },
        [RC_PROTO_SHARP] = { .name = "sharp",
                .scancode_bits = 0x1fff, .repeat_period = 250 },
        [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 },
@@ -170,10 +170,11 @@ static struct rc_map_list empty_map = {
  * @name:      name to assign to the table
  * @rc_proto:  ir type to assign to the new table
  * @size:      initial size of the table
- * @return:    zero on success or a negative error code
  *
  * This routine will initialize the rc_map and will allocate
  * memory to hold at least the specified number of elements.
+ *
+ * return:     zero on success or a negative error code
  */
 static int ir_create_table(struct rc_map *rc_map,
                           const char *name, u64 rc_proto, size_t size)
@@ -216,10 +217,11 @@ static void ir_free_table(struct rc_map *rc_map)
  * ir_resize_table() - resizes a scancode table if necessary
  * @rc_map:    the rc_map to resize
  * @gfp_flags: gfp flags to use when allocating memory
- * @return:    zero on success or a negative error code
  *
  * This routine will shrink the rc_map if it has lots of
  * unused entries and grow it if it is full.
+ *
+ * return:     zero on success or a negative error code
  */
 static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags)
 {
@@ -265,11 +267,13 @@ static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags)
  * @dev:       the struct rc_dev device descriptor
  * @rc_map:    scancode table to be adjusted
  * @index:     index of the mapping that needs to be updated
- * @keycode:   the desired keycode
- * @return:    previous keycode assigned to the mapping
+ * @new_keycode: the desired keycode
  *
  * This routine is used to update scancode->keycode mapping at given
  * position.
+ *
+ * return:     previous keycode assigned to the mapping
+ *
  */
 static unsigned int ir_update_mapping(struct rc_dev *dev,
                                      struct rc_map *rc_map,
@@ -320,12 +324,13 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
  * @scancode:  the desired scancode
  * @resize:    controls whether we allowed to resize the table to
  *             accommodate not yet present scancodes
- * @return:    index of the mapping containing scancode in question
- *             or -1U in case of failure.
  *
  * This routine is used to locate given scancode in rc_map.
  * If scancode is not yet present the routine will allocate a new slot
  * for it.
+ *
+ * return:     index of the mapping containing scancode in question
+ *             or -1U in case of failure.
  */
 static unsigned int ir_establish_scancode(struct rc_dev *dev,
                                          struct rc_map *rc_map,
@@ -375,11 +380,12 @@ static unsigned int ir_establish_scancode(struct rc_dev *dev,
 /**
  * ir_setkeycode() - set a keycode in the scancode->keycode table
  * @idev:      the struct input_dev device descriptor
- * @scancode:  the desired scancode
- * @keycode:   result
- * @return:    -EINVAL if the keycode could not be inserted, otherwise zero.
+ * @ke:                Input keymap entry
+ * @old_keycode: result
  *
  * This routine is used to handle evdev EVIOCSKEY ioctl.
+ *
+ * return:     -EINVAL if the keycode could not be inserted, otherwise zero.
  */
 static int ir_setkeycode(struct input_dev *idev,
                         const struct input_keymap_entry *ke,
@@ -422,11 +428,11 @@ out:
 /**
  * ir_setkeytable() - sets several entries in the scancode->keycode table
  * @dev:       the struct rc_dev device descriptor
- * @to:                the struct rc_map to copy entries to
  * @from:      the struct rc_map to copy entries from
- * @return:    -ENOMEM if all keycodes could not be inserted, otherwise zero.
  *
  * This routine is used to handle table initialization.
+ *
+ * return:     -ENOMEM if all keycodes could not be inserted, otherwise zero.
  */
 static int ir_setkeytable(struct rc_dev *dev,
                          const struct rc_map *from)
@@ -474,10 +480,11 @@ static int rc_map_cmp(const void *key, const void *elt)
  * ir_lookup_by_scancode() - locate mapping by scancode
  * @rc_map:    the struct rc_map to search
  * @scancode:  scancode to look for in the table
- * @return:    index in the table, -1U if not found
  *
  * This routine performs binary search in RC keykeymap table for
  * given scancode.
+ *
+ * return:     index in the table, -1U if not found
  */
 static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
                                          unsigned int scancode)
@@ -495,11 +502,11 @@ static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
 /**
  * ir_getkeycode() - get a keycode from the scancode->keycode table
  * @idev:      the struct input_dev device descriptor
- * @scancode:  the desired scancode
- * @keycode:   used to return the keycode, if found, or KEY_RESERVED
- * @return:    always returns zero.
+ * @ke:                Input keymap entry
  *
  * This routine is used to handle evdev EVIOCGKEY ioctl.
+ *
+ * return:     always returns zero.
  */
 static int ir_getkeycode(struct input_dev *idev,
                         struct input_keymap_entry *ke)
@@ -556,11 +563,12 @@ out:
  * rc_g_keycode_from_table() - gets the keycode that corresponds to a scancode
  * @dev:       the struct rc_dev descriptor of the device
  * @scancode:  the scancode to look for
- * @return:    the corresponding keycode, or KEY_RESERVED
  *
  * This routine is used by drivers which need to convert a scancode to a
  * keycode. Normally it should not be used since drivers should have no
  * interest in keycodes.
+ *
+ * return:     the corresponding keycode, or KEY_RESERVED
  */
 u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode)
 {
@@ -625,7 +633,8 @@ EXPORT_SYMBOL_GPL(rc_keyup);
 
 /**
  * ir_timer_keyup() - generates a keyup event after a timeout
- * @cookie:    a pointer to the struct rc_dev for the device
+ *
+ * @t:         a pointer to the struct timer_list
  *
  * This routine will generate a keyup event some time after a keydown event
  * is generated when no further activity has been detected.
@@ -780,7 +789,8 @@ EXPORT_SYMBOL_GPL(rc_keydown_notimeout);
  *                       provides sensible defaults
  * @dev:       the struct rc_dev descriptor of the device
  * @filter:    the scancode and mask
- * @return:    0 or -EINVAL if the filter is not valid
+ *
+ * return:     0 or -EINVAL if the filter is not valid
  */
 static int rc_validate_filter(struct rc_dev *dev,
                              struct rc_scancode_filter *filter)
index 7612066..9ee2c91 100644 (file)
@@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val);
 static irqreturn_t sir_interrupt(int irq, void *dev_id);
 static void send_space(unsigned long len);
 static void send_pulse(unsigned long len);
-static void init_hardware(void);
+static int init_hardware(void);
 static void drop_hardware(void);
 /* Initialisation */
 
@@ -263,11 +263,36 @@ static void send_pulse(unsigned long len)
        }
 }
 
-static void init_hardware(void)
+static int init_hardware(void)
 {
+       u8 scratch, scratch2, scratch3;
        unsigned long flags;
 
        spin_lock_irqsave(&hardware_lock, flags);
+
+       /*
+        * This is a simple port existence test, borrowed from the autoconfig
+        * function in drivers/tty/serial/8250/8250_port.c
+        */
+       scratch = sinp(UART_IER);
+       soutp(UART_IER, 0);
+#ifdef __i386__
+       outb(0xff, 0x080);
+#endif
+       scratch2 = sinp(UART_IER) & 0x0f;
+       soutp(UART_IER, 0x0f);
+#ifdef __i386__
+       outb(0x00, 0x080);
+#endif
+       scratch3 = sinp(UART_IER) & 0x0f;
+       soutp(UART_IER, scratch);
+       if (scratch2 != 0 || scratch3 != 0x0f) {
+               /* we fail, there's nothing here */
+               spin_unlock_irqrestore(&hardware_lock, flags);
+               pr_err("port existence test failed, cannot continue\n");
+               return -ENODEV;
+       }
+
        /* reset UART */
        outb(0, io + UART_MCR);
        outb(0, io + UART_IER);
@@ -285,6 +310,8 @@ static void init_hardware(void)
        /* turn on UART */
        outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
        spin_unlock_irqrestore(&hardware_lock, flags);
+
+       return 0;
 }
 
 static void drop_hardware(void)
@@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev)
                pr_err("IRQ %d already in use.\n", irq);
                return retval;
        }
+
+       retval = init_hardware();
+       if (retval) {
+               del_timer_sync(&timerlist);
+               return retval;
+       }
+
        pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
 
        retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
        if (retval < 0)
                return retval;
 
-       init_hardware();
-
        return 0;
 }
 
index a8e39c6..d2efd7b 100644 (file)
@@ -49,7 +49,7 @@ struct st_rc_device {
 #define IRB_RX_NOISE_SUPPR      0x5c   /* noise suppression  */
 #define IRB_RX_POLARITY_INV     0x68   /* polarity inverter  */
 
-/**
+/*
  * IRQ set: Enable full FIFO                 1  -> bit  3;
  *          Enable overrun IRQ               1  -> bit  2;
  *          Enable last symbol IRQ           1  -> bit  1:
@@ -72,7 +72,7 @@ static void st_rc_send_lirc_timeout(struct rc_dev *rdev)
        ir_raw_event_store(rdev, &ev);
 }
 
-/**
+/*
  * RX graphical example to better understand the difference between ST IR block
  * output and standard definition used by LIRC (and most of the world!)
  *
@@ -317,7 +317,7 @@ static int st_rc_probe(struct platform_device *pdev)
        device_init_wakeup(dev, true);
        dev_pm_set_wake_irq(dev, rc_dev->irq);
 
-       /**
+       /*
         * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW
         * lircd expects a long space first before a signal train to sync.
         */
index 4eebfcf..c9a70fd 100644 (file)
@@ -191,7 +191,7 @@ static void sz_push_half_space(struct streamzap_ir *sz,
        sz_push_full_space(sz, value & SZ_SPACE_MASK);
 }
 
-/**
+/*
  * streamzap_callback - usb IRQ handler callback
  *
  * This procedure is invoked on reception of data from
@@ -321,7 +321,7 @@ out:
        return NULL;
 }
 
-/**
+/*
  *     streamzap_probe
  *
  *     Called by usb-core to associated with a candidate device
@@ -450,7 +450,7 @@ free_sz:
        return retval;
 }
 
-/**
+/*
  * streamzap_disconnect
  *
  * Called by the usb core when the device is removed from the system.
index 8b39d8d..5c87c5c 100644 (file)
@@ -1397,9 +1397,9 @@ static u32 MT2063_Round_fLO(u32 f_LO, u32 f_LO_Step, u32 f_ref)
  *                        risk of overflow.  It accurately calculates
  *                        f_ref * num / denom to within 1 HZ with fixed math.
  *
- * @num :      Fractional portion of the multiplier
+ * @f_ref:     SRO frequency.
+ * @num:       Fractional portion of the multiplier
  * @denom:     denominator portion of the ratio
- * @f_Ref:     SRO frequency.
  *
  * This calculation handles f_ref as two separate 14-bit fields.
  * Therefore, a maximum value of 2^28-1 may safely be used for f_ref.
@@ -1464,8 +1464,6 @@ static u32 MT2063_CalcLO1Mult(u32 *Div,
  * @f_LO:      desired LO frequency.
  * @f_LO_Step: Minimum step size for the LO (in Hz).
  * @f_Ref:     SRO frequency.
- * @f_Avoid:   Range of PLL frequencies to avoid near
- *             integer multiples of f_Ref (in Hz).
  *
  * Returns: Recalculated LO frequency.
  */
index f9772ad..5a2f813 100644 (file)
@@ -26,7 +26,7 @@
 #include "cinergyT2.h"
 
 
-/**
+/*
  *  convert linux-dvb frontend parameter set into TPS.
  *  See ETSI ETS-300744, section 4.6.2, table 9 for details.
  *
index 92098c1..366b055 100644 (file)
@@ -1677,10 +1677,10 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
                return -EINVAL;
        }
 
-       /** Update PLL if needed ratio **/
+       /* Update PLL if needed ratio */
        state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, 0);
 
-       /** Get optimize PLL ratio to remove spurious **/
+       /* Get optimize PLL ratio to remove spurious */
        pll_ratio = dib8090_compute_pll_parameters(fe);
        if (pll_ratio == 17)
                timf = 21387946;
@@ -1691,7 +1691,7 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
        else
                timf = 18179756;
 
-       /** Update ratio **/
+       /* Update ratio */
        state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, pll_ratio);
 
        state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, timf);
@@ -3357,7 +3357,7 @@ static int novatd_sleep_override(struct dvb_frontend* fe)
        return state->sleep(fe);
 }
 
-/**
+/*
  * novatd_frontend_attach - Nova-TD specific attach
  *
  * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
index 8207e69..bcacb0f 100644 (file)
@@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
 
 int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
 {
-       u8 wbuf[1] = { offs };
-       return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
+       u8 *buf;
+       int rc;
+
+       buf = kmalloc(2, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       buf[0] = offs;
+
+       rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
+       *val = buf[1];
+       kfree(buf);
+
+       return rc;
 }
 EXPORT_SYMBOL(dibusb_read_eeprom_byte);
 
index 4126131..b6046e0 100644 (file)
@@ -297,7 +297,7 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
 }
 
 
-/**
+/*
  * (reg, val) commad list to initialize this module.
  *  captured on a Windows box.
  */
index 62abe6c..1687594 100644 (file)
@@ -21,7 +21,7 @@ MODULE_PARM_DESC(debug,
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
-/**
+/*
  * Indirect I2C access to the PLL via FE.
  * whole I2C protocol data to the PLL is sent via the FE's I2C register.
  * This is done by a control msg to the FE with the I2C data accompanied, and
index f1537da..1b30434 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * OV519 driver
  *
  * Copyright (C) 2008-2011 Jean-François Moine <moinejf@free.fr>
index 3792fed..1283b3b 100644 (file)
@@ -649,11 +649,10 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
 }
 
 /**
- *
  * Uncompress a pwc23 buffer.
- *
- * src: raw data
- * dst: image output
+ * @pdev: pointer to pwc device's internal struct
+ * @src: raw data
+ * @dst: image output
  */
 void pwc_dec23_decompress(struct pwc_device *pdev,
                          const void *src,
index 8c1f926..d07349c 100644 (file)
@@ -74,7 +74,7 @@ struct smsusb_device_t {
 static int smsusb_submit_urb(struct smsusb_device_t *dev,
                             struct smsusb_urb_t *surb);
 
-/**
+/*
  * Completing URB's callback handler - bottom half (proccess context)
  * submits the URB prepared on smsusb_onresponse()
  */
@@ -86,7 +86,7 @@ static void do_submit_urb(struct work_struct *work)
        smsusb_submit_urb(dev, surb);
 }
 
-/**
+/*
  * Completing URB's callback handler - top half (interrupt context)
  * adds completing sms urb to the global surbs list and activtes the worker
  * thread the surb
index b842f36..a142b9d 100644 (file)
@@ -76,7 +76,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 #define TTUSB_REV_2_2  0x22
 #define TTUSB_BUDGET_NAME "ttusb_stc_fw"
 
-/**
+/*
  *  since we're casting (struct ttusb*) <-> (struct dvb_demux*) around
  *  the dvb_demux field must be the first in struct!!
  */
@@ -713,7 +713,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
                                        }
                                }
 
-                       /**
+                       /*
                         * if length is valid and we reached the end:
                         * goto next muxpack
                         */
@@ -729,7 +729,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
                                        /* maximum bytes, until we know the length */
                                        ttusb->muxpack_len = 2;
 
-                               /**
+                               /*
                                 * no muxpacks left?
                                 * return to search-sync state
                                 */
index b55b79b..127f8a0 100644 (file)
@@ -144,6 +144,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
 
 static const struct usb_device_id usbtv_id_table[] = {
        { USB_DEVICE(0x1b71, 0x3002) },
+       { USB_DEVICE(0x1f71, 0x3301) },
        {}
 };
 MODULE_DEVICE_TABLE(usb, usbtv_id_table);
index 8db45df..82852f2 100644 (file)
@@ -239,7 +239,7 @@ static const struct analog_demod_ops tuner_analog_ops = {
  * @type:              type of the tuner (e. g. tuner number)
  * @new_mode_mask:     Indicates if tuner supports TV and/or Radio
  * @new_config:                an optional parameter used by a few tuners to adjust
                      internal parameters, like LNA mode
*                     internal parameters, like LNA mode
  * @tuner_callback:    an optional function to be called when switching
  *                     to analog mode
  *
@@ -750,6 +750,7 @@ static int tuner_remove(struct i2c_client *client)
 /**
  * check_mode - Verify if tuner supports the requested mode
  * @t: a pointer to the module's internal struct_tuner
+ * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type.
  *
  * This function checks if the tuner is capable of tuning analog TV,
  * digital TV or radio, depending on what the caller wants. If the
@@ -757,6 +758,7 @@ static int tuner_remove(struct i2c_client *client)
  * returns 0.
  * This function is needed for boards that have a separate tuner for
  * radio (like devices with tea5767).
+ *
  * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
  *       select a TV frequency. So, t_mode = T_ANALOG_TV could actually
  *      be used to represent a Digital TV too.
index a7c3464..e5acfab 100644 (file)
@@ -558,8 +558,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
                if (!asd)
                        continue;
 
-               ret = v4l2_async_match_notify(notifier, notifier->v4l2_dev, sd,
-                                             asd);
+               ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
                if (ret)
                        goto err_unbind;
 
index 5c8c49d..930f9c5 100644 (file)
@@ -245,11 +245,11 @@ EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
 
 /**
  * v4l2_match_dv_timings - check if two timings match
- * @t1 - compare this v4l2_dv_timings struct...
- * @t2 - with this struct.
- * @pclock_delta - the allowed pixelclock deviation.
- * @match_reduced_fps - if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
- * match.
+ * @t1: compare this v4l2_dv_timings struct...
+ * @t2: with this struct.
+ * @pclock_delta: the allowed pixelclock deviation.
+ * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
+ *     match.
  *
  * Compare t1 with t2 with a given margin of error for the pixelclock.
  */
index 681b192..fb72c7a 100644 (file)
@@ -458,11 +458,6 @@ static int __v4l2_async_notifier_parse_fwnode_endpoints(
                if (!is_available)
                        continue;
 
-               if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
-                       ret = -EINVAL;
-                       break;
-               }
-
                if (has_port) {
                        struct fwnode_endpoint ep;
 
@@ -474,6 +469,11 @@ static int __v4l2_async_notifier_parse_fwnode_endpoints(
                                continue;
                }
 
+               if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+                       ret = -EINVAL;
+                       break;
+               }
+
                ret = v4l2_async_notifier_fwnode_parse_endpoint(
                        dev, notifier, fwnode, asd_struct_size, parse_endpoint);
                if (ret < 0)
index f62e68a..bc580fb 100644 (file)
@@ -183,6 +183,7 @@ EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
 
 /**
  * v4l2_m2m_try_run() - select next job to perform and run it if possible
+ * @m2m_dev: per-device context
  *
  * Get next transaction (if present) from the waiting jobs list and run it.
  */
@@ -281,6 +282,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
 
 /**
  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
+ * @m2m_ctx: m2m context with jobs to be canceled
  *
  * In case of streamoff or release called on any context,
  * 1] If the context is currently running, then abort job will be called
index 1dbf6f7..e87fb13 100644 (file)
@@ -222,7 +222,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
 }
 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
 
-/**
+/*
  * __videobuf_free() - free all the buffers and their control structures
  *
  * This function can only be called if streaming/reading is off, i.e. no buffers
index 0b5c43f..f412429 100644 (file)
@@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
        dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
                data, size, dma->nr_pages);
 
-       err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+       err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
                             flags, dma->pages, NULL);
 
        if (err != dma->nr_pages) {
                dma->nr_pages = (err >= 0) ? err : 0;
-               dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+               dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+                       dma->nr_pages);
                return err < 0 ? err : -EINVAL;
        }
        return 0;
index cb115ba..a8589d9 100644 (file)
@@ -188,7 +188,7 @@ module_param(debug, int, 0644);
 static void __vb2_queue_cancel(struct vb2_queue *q);
 static void __enqueue_in_driver(struct vb2_buffer *vb);
 
-/**
+/*
  * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
  */
 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
@@ -229,7 +229,7 @@ free:
        return ret;
 }
 
-/**
+/*
  * __vb2_buf_mem_free() - free memory of the given buffer
  */
 static void __vb2_buf_mem_free(struct vb2_buffer *vb)
@@ -243,7 +243,7 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
        }
 }
 
-/**
+/*
  * __vb2_buf_userptr_put() - release userspace memory associated with
  * a USERPTR buffer
  */
@@ -258,7 +258,7 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
        }
 }
 
-/**
+/*
  * __vb2_plane_dmabuf_put() - release memory associated with
  * a DMABUF shared plane
  */
@@ -277,7 +277,7 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
        p->dbuf_mapped = 0;
 }
 
-/**
+/*
  * __vb2_buf_dmabuf_put() - release memory associated with
  * a DMABUF shared buffer
  */
@@ -289,7 +289,7 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
                __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
 }
 
-/**
+/*
  * __setup_offsets() - setup unique offsets ("cookies") for every plane in
  * the buffer.
  */
@@ -317,7 +317,7 @@ static void __setup_offsets(struct vb2_buffer *vb)
        }
 }
 
-/**
+/*
  * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
  * video buffer memory for all buffers/planes on the queue and initializes the
  * queue
@@ -386,7 +386,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
        return buffer;
 }
 
-/**
+/*
  * __vb2_free_mem() - release all video buffer memory for a given queue
  */
 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
@@ -410,7 +410,7 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
        }
 }
 
-/**
+/*
  * __vb2_queue_free() - free buffers at the end of the queue - video memory and
  * related information, if no buffers are left return the queue to an
  * uninitialized state. Might be called even if the queue has already been freed.
@@ -544,7 +544,7 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
 }
 EXPORT_SYMBOL(vb2_buffer_in_use);
 
-/**
+/*
  * __buffers_in_use() - return true if any buffers on the queue are in use and
  * the queue cannot be freed (by the means of REQBUFS(0)) call
  */
@@ -564,7 +564,7 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
 }
 EXPORT_SYMBOL_GPL(vb2_core_querybuf);
 
-/**
+/*
  * __verify_userptr_ops() - verify that all memory operations required for
  * USERPTR queue type have been provided
  */
@@ -577,7 +577,7 @@ static int __verify_userptr_ops(struct vb2_queue *q)
        return 0;
 }
 
-/**
+/*
  * __verify_mmap_ops() - verify that all memory operations required for
  * MMAP queue type have been provided
  */
@@ -590,7 +590,7 @@ static int __verify_mmap_ops(struct vb2_queue *q)
        return 0;
 }
 
-/**
+/*
  * __verify_dmabuf_ops() - verify that all memory operations required for
  * DMABUF queue type have been provided
  */
@@ -953,7 +953,7 @@ void vb2_discard_done(struct vb2_queue *q)
 }
 EXPORT_SYMBOL_GPL(vb2_discard_done);
 
-/**
+/*
  * __prepare_mmap() - prepare an MMAP buffer
  */
 static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
@@ -966,7 +966,7 @@ static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
        return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
 }
 
-/**
+/*
  * __prepare_userptr() - prepare a USERPTR buffer
  */
 static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
@@ -1082,7 +1082,7 @@ err:
        return ret;
 }
 
-/**
+/*
  * __prepare_dmabuf() - prepare a DMABUF buffer
  */
 static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
@@ -1215,7 +1215,7 @@ err:
        return ret;
 }
 
-/**
+/*
  * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
  */
 static void __enqueue_in_driver(struct vb2_buffer *vb)
@@ -1298,7 +1298,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
 }
 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
 
-/**
+/*
  * vb2_start_streaming() - Attempt to start streaming.
  * @q:         videobuf2 queue
  *
@@ -1427,7 +1427,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
 }
 EXPORT_SYMBOL_GPL(vb2_core_qbuf);
 
-/**
+/*
  * __vb2_wait_for_done_vb() - wait for a buffer to become available
  * for dequeuing
  *
@@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
        return 0;
 }
 
-/**
+/*
  * __vb2_get_done_vb() - get a buffer ready for dequeuing
  *
  * Will sleep if required for nonblocking == false.
@@ -1553,7 +1553,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
 }
 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
 
-/**
+/*
  * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
  */
 static void __vb2_dqbuf(struct vb2_buffer *vb)
@@ -1625,7 +1625,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
 }
 EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
 
-/**
+/*
  * __vb2_queue_cancel() - cancel and stop (pause) streaming
  *
  * Removes all queued buffers from driver's queue and all buffers queued by
@@ -1773,7 +1773,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
 }
 EXPORT_SYMBOL_GPL(vb2_core_streamoff);
 
-/**
+/*
  * __find_plane_by_offset() - find plane associated with the given offset off
  */
 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
@@ -2104,7 +2104,7 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
 }
 EXPORT_SYMBOL_GPL(vb2_core_poll);
 
-/**
+/*
  * struct vb2_fileio_buf - buffer context used by file io emulator
  *
  * vb2 provides a compatibility layer and emulator of file io (read and
@@ -2118,7 +2118,7 @@ struct vb2_fileio_buf {
        unsigned int queued:1;
 };
 
-/**
+/*
  * struct vb2_fileio_data - queue context used by file io emulator
  *
  * @cur_index: the index of the buffer currently being read from or
@@ -2155,7 +2155,7 @@ struct vb2_fileio_data {
        unsigned write_immediately:1;
 };
 
-/**
+/*
  * __vb2_init_fileio() - initialize file io emulator
  * @q:         videobuf2 queue
  * @read:      mode selector (1 means read, 0 means write)
@@ -2274,7 +2274,7 @@ err_kfree:
        return ret;
 }
 
-/**
+/*
  * __vb2_cleanup_fileio() - free resourced used by file io emulator
  * @q:         videobuf2 queue
  */
@@ -2293,7 +2293,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
        return 0;
 }
 
-/**
+/*
  * __vb2_perform_fileio() - perform a single file io (read or write) operation
  * @q:         videobuf2 queue
  * @data:      pointed to target userspace buffer
index 4bb8424..89e5198 100644 (file)
@@ -120,7 +120,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
        h->put(h->arg);
 }
 
-/**
+/*
  * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
  * video buffers
  */
index 0c06699..4075314 100644 (file)
@@ -49,7 +49,7 @@ module_param(debug, int, 0644);
 #define V4L2_BUFFER_OUT_FLAGS  (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
                                 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
 
-/**
+/*
  * __verify_planes_array() - verify that the planes array passed in struct
  * v4l2_buffer from userspace can be safely used
  */
@@ -78,7 +78,7 @@ static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
        return __verify_planes_array(vb, pb);
 }
 
-/**
+/*
  * __verify_length() - Verify that the bytesused value for each plane fits in
  * the plane length and that the data offset doesn't exceed the bytesused value.
  */
@@ -181,7 +181,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
        return __verify_planes_array(q->bufs[b->index], b);
 }
 
-/**
+/*
  * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
  * returned to userspace
  */
@@ -286,7 +286,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
                q->last_buffer_dequeued = true;
 }
 
-/**
+/*
  * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
  * v4l2_buffer by the userspace. It also verifies that struct
  * v4l2_buffer has a valid number of planes.
@@ -446,7 +446,7 @@ static const struct vb2_buf_ops v4l2_buf_ops = {
        .copy_timestamp         = __copy_timestamp,
 };
 
-/**
+/*
  * vb2_querybuf() - query video buffer information
  * @q:         videobuf queue
  * @b:         buffer struct passed from userspace to vidioc_querybuf handler
index 7310e32..aa2b078 100644 (file)
@@ -45,7 +45,7 @@ config MEMSTICK_R592
 
 config MEMSTICK_REALTEK_PCI
        tristate "Realtek PCI-E Memstick Card Interface Driver"
-       depends on MFD_RTSX_PCI
+       depends on MISC_RTSX_PCI
        help
          Say Y here to include driver code to support Memstick card interface
          of Realtek PCI-E card reader
@@ -55,7 +55,7 @@ config MEMSTICK_REALTEK_PCI
 
 config MEMSTICK_REALTEK_USB
        tristate "Realtek USB Memstick Card Interface Driver"
-       depends on MFD_RTSX_USB
+       depends on MISC_RTSX_USB
        help
          Say Y here to include driver code to support Memstick card interface
          of Realtek RTS5129/39 series USB card reader
index 818fa94..a44b457 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/memstick.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
 #include <asm/unaligned.h>
 
 struct realtek_pci_ms {
index 2e3cf01..4f64563 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/workqueue.h>
 #include <linux/memstick.h>
 #include <linux/kthread.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
 #include <linux/pm_runtime.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
index 1d20a80..b860eb5 100644 (file)
@@ -222,6 +222,16 @@ config MFD_CROS_EC_SPI
          response time cannot be guaranteed, we support ignoring
          'pre-amble' bytes before the response actually starts.
 
+config MFD_CROS_EC_CHARDEV
+        tristate "Chrome OS Embedded Controller userspace device interface"
+        depends on MFD_CROS_EC
+        select CROS_EC_CTL
+        ---help---
+          This driver adds support to talk with the ChromeOS EC from userspace.
+
+          If you have a supported Chromebook, choose Y or M here.
+          The module will be called cros_ec_dev.
+
 config MFD_ASIC3
        bool "Compaq ASIC3"
        depends on GPIOLIB && ARM
@@ -877,7 +887,7 @@ config UCB1400_CORE
 
 config MFD_PM8XXX
        tristate "Qualcomm PM8xxx PMIC chips driver"
-       depends on (ARM || HEXAGON)
+       depends on (ARM || HEXAGON || COMPILE_TEST)
        select IRQ_DOMAIN
        select MFD_CORE
        select REGMAP
@@ -929,17 +939,6 @@ config MFD_RDC321X
          southbridge which provides access to GPIOs and Watchdog using the
          southbridge PCI device configuration space.
 
-config MFD_RTSX_PCI
-       tristate "Realtek PCI-E card reader"
-       depends on PCI
-       select MFD_CORE
-       help
-         This supports for Realtek PCI-Express card reader including rts5209,
-         rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, etc.
-         Realtek card reader supports access to many types of memory cards,
-         such as Memory Stick, Memory Stick Pro, Secure Digital and
-         MultiMediaCard.
-
 config MFD_RT5033
        tristate "Richtek RT5033 Power Management IC"
        depends on I2C
@@ -953,16 +952,6 @@ config MFD_RT5033
          sub-devices like charger, fuel gauge, flash LED, current source,
          LDO and Buck.
 
-config MFD_RTSX_USB
-       tristate "Realtek USB card reader"
-       depends on USB
-       select MFD_CORE
-       help
-         Select this option to get support for Realtek USB 2.0 card readers
-         including RTS5129, RTS5139, RTS5179 and RTS5170.
-         Realtek card reader supports access to many types of memory cards,
-         such as Memory Stick Pro, Secure Digital and MultiMediaCard.
-
 config MFD_RC5T583
        bool "Ricoh RC5T583 Power Management system device"
        depends on I2C=y
@@ -1859,5 +1848,13 @@ config MFD_VEXPRESS_SYSREG
          System Registers are the platform configuration block
          on the ARM Ltd. Versatile Express board.
 
+config RAVE_SP_CORE
+       tristate "RAVE SP MCU core driver"
+       depends on SERIAL_DEV_BUS
+       select CRC_CCITT
+       help
+         Select this to get support for the Supervisory Processor
+         device found on several devices in RAVE line of hardware.
+
 endmenu
 endif
index d9474ad..d9d2cf0 100644 (file)
@@ -17,12 +17,9 @@ cros_ec_core-$(CONFIG_ACPI)  += cros_ec_acpi_gpe.o
 obj-$(CONFIG_MFD_CROS_EC)      += cros_ec_core.o
 obj-$(CONFIG_MFD_CROS_EC_I2C)  += cros_ec_i2c.o
 obj-$(CONFIG_MFD_CROS_EC_SPI)  += cros_ec_spi.o
+obj-$(CONFIG_MFD_CROS_EC_CHARDEV) += cros_ec_dev.o
 obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
 
-rtsx_pci-objs                  := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
-obj-$(CONFIG_MFD_RTSX_PCI)     += rtsx_pci.o
-obj-$(CONFIG_MFD_RTSX_USB)     += rtsx_usb.o
-
 obj-$(CONFIG_HTC_PASIC3)       += htc-pasic3.o
 obj-$(CONFIG_HTC_I2CPLD)       += htc-i2cpld.o
 
@@ -230,3 +227,5 @@ obj-$(CONFIG_MFD_STM32_LPTIMER)     += stm32-lptimer.o
 obj-$(CONFIG_MFD_STM32_TIMERS)         += stm32-timers.o
 obj-$(CONFIG_MFD_MXS_LRADC)     += mxs-lradc.o
 obj-$(CONFIG_MFD_SC27XX_PMIC)  += sprd-sc27xx-spi.o
+obj-$(CONFIG_RAVE_SP_CORE)     += rave-sp.o
+
index c1c8152..1afa27d 100644 (file)
@@ -1258,6 +1258,19 @@ static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
        },
 };
 
+#define DEFINE_SHOW_ATTRIBUTE(__name)                                        \
+static int __name ## _open(struct inode *inode, struct file *file)           \
+{                                                                            \
+       return single_open(file, __name ## _show, inode->i_private);          \
+}                                                                            \
+                                                                             \
+static const struct file_operations __name ## _fops = {                              \
+       .owner          = THIS_MODULE,                                        \
+       .open           = __name ## _open,                                    \
+       .read           = seq_read,                                           \
+       .llseek         = seq_lseek,                                          \
+       .release        = single_release,                                     \
+}                                                                            \
 
 static irqreturn_t ab8500_debug_handler(int irq, void *data)
 {
@@ -1318,7 +1331,7 @@ static int ab8500_registers_print(struct device *dev, u32 bank,
        return 0;
 }
 
-static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+static int ab8500_bank_registers_show(struct seq_file *s, void *p)
 {
        struct device *dev = s->private;
        u32 bank = debug_bank;
@@ -1330,18 +1343,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p)
        return ab8500_registers_print(dev, bank, s);
 }
 
-static int ab8500_registers_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_print_bank_registers, inode->i_private);
-}
-
-static const struct file_operations ab8500_registers_fops = {
-       .open = ab8500_registers_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_bank_registers);
 
 static int ab8500_print_all_banks(struct seq_file *s, void *p)
 {
@@ -1528,7 +1530,7 @@ void ab8500_debug_register_interrupt(int line)
                num_interrupts[line]++;
 }
 
-static int ab8500_interrupts_print(struct seq_file *s, void *p)
+static int ab8500_interrupts_show(struct seq_file *s, void *p)
 {
        int line;
 
@@ -1557,10 +1559,7 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_interrupts_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_interrupts_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_interrupts);
 
 /*
  * - HWREG DB8500 formated routines
@@ -1603,7 +1602,7 @@ static int ab8500_hwreg_open(struct inode *inode, struct file *file)
 #define AB8500_LAST_SIM_REG 0x8B
 #define AB8505_LAST_SIM_REG 0x8C
 
-static int ab8500_print_modem_registers(struct seq_file *s, void *p)
+static int ab8500_modem_show(struct seq_file *s, void *p)
 {
        struct device *dev = s->private;
        struct ab8500 *ab8500;
@@ -1620,18 +1619,15 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
 
        err = abx500_get_register_interruptible(dev,
                AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, &orig_value);
-       if (err < 0) {
-               dev_err(dev, "ab->read fail %d\n", err);
-               return err;
-       }
+       if (err < 0)
+               goto report_read_failure;
+
        /* Config 1 will allow APE side to read SIM registers */
        err = abx500_set_register_interruptible(dev,
                AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG,
                AB8500_SUPPLY_CONTROL_CONFIG_1);
-       if (err < 0) {
-               dev_err(dev, "ab->write fail %d\n", err);
-               return err;
-       }
+       if (err < 0)
+               goto report_write_failure;
 
        seq_printf(s, " bank 0x%02X:\n", bank);
 
@@ -1641,36 +1637,30 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
        for (reg = AB8500_FIRST_SIM_REG; reg <= last_sim_reg; reg++) {
                err = abx500_get_register_interruptible(dev,
                        bank, reg, &value);
-               if (err < 0) {
-                       dev_err(dev, "ab->read fail %d\n", err);
-                       return err;
-               }
+               if (err < 0)
+                       goto report_read_failure;
+
                seq_printf(s, "  [0x%02X/0x%02X]: 0x%02X\n", bank, reg, value);
        }
        err = abx500_set_register_interruptible(dev,
                AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, orig_value);
-       if (err < 0) {
-               dev_err(dev, "ab->write fail %d\n", err);
-               return err;
-       }
+       if (err < 0)
+               goto report_write_failure;
+
        return 0;
-}
 
-static int ab8500_modem_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_print_modem_registers,
-                          inode->i_private);
+report_read_failure:
+       dev_err(dev, "ab->read fail %d\n", err);
+       return err;
+
+report_write_failure:
+       dev_err(dev, "ab->write fail %d\n", err);
+       return err;
 }
 
-static const struct file_operations ab8500_modem_fops = {
-       .open = ab8500_modem_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
 
-static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
 {
        int bat_ctrl_raw;
        int bat_ctrl_convert;
@@ -1687,21 +1677,9 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_gpadc_bat_ctrl_print,
-                          inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
-       .open = ab8500_gpadc_bat_ctrl_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
 
-static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
 {
        int btemp_ball_raw;
        int btemp_ball_convert;
@@ -1718,22 +1696,9 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
-                                       struct file *file)
-{
-       return single_open(file, ab8500_gpadc_btemp_ball_print,
-                          inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
 
-static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
-       .open = ab8500_gpadc_btemp_ball_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
 {
        int main_charger_v_raw;
        int main_charger_v_convert;
@@ -1750,22 +1715,9 @@ static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
-                                           struct file *file)
-{
-       return single_open(file, ab8500_gpadc_main_charger_v_print,
-               inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
-       .open = ab8500_gpadc_main_charger_v_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
 
-static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
 {
        int acc_detect1_raw;
        int acc_detect1_convert;
@@ -1782,22 +1734,9 @@ static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
-                                        struct file *file)
-{
-       return single_open(file, ab8500_gpadc_acc_detect1_print,
-               inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
-       .open = ab8500_gpadc_acc_detect1_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
 
-static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
 {
        int acc_detect2_raw;
        int acc_detect2_convert;
@@ -1814,22 +1753,9 @@ static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, ab8500_gpadc_acc_detect2_print,
-               inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
-       .open = ab8500_gpadc_acc_detect2_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
 
-static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
 {
        int aux1_raw;
        int aux1_convert;
@@ -1846,20 +1772,9 @@ static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
 
-static const struct file_operations ab8500_gpadc_aux1_fops = {
-       .open = ab8500_gpadc_aux1_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
 {
        int aux2_raw;
        int aux2_convert;
@@ -1876,20 +1791,9 @@ static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_aux2_fops = {
-       .open = ab8500_gpadc_aux2_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
 
-static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
 {
        int main_bat_v_raw;
        int main_bat_v_convert;
@@ -1906,22 +1810,9 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
-                                       struct file *file)
-{
-       return single_open(file, ab8500_gpadc_main_bat_v_print,
-                          inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
 
-static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
-       .open = ab8500_gpadc_main_bat_v_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
 {
        int vbus_v_raw;
        int vbus_v_convert;
@@ -1938,20 +1829,9 @@ static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
 
-static const struct file_operations ab8500_gpadc_vbus_v_fops = {
-       .open = ab8500_gpadc_vbus_v_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
 {
        int main_charger_c_raw;
        int main_charger_c_convert;
@@ -1968,22 +1848,9 @@ static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, ab8500_gpadc_main_charger_c_print,
-               inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
-       .open = ab8500_gpadc_main_charger_c_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
 
-static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
 {
        int usb_charger_c_raw;
        int usb_charger_c_convert;
@@ -2000,22 +1867,9 @@ static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, ab8500_gpadc_usb_charger_c_print,
-               inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
-       .open = ab8500_gpadc_usb_charger_c_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
 
-static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
 {
        int bk_bat_v_raw;
        int bk_bat_v_convert;
@@ -2032,21 +1886,9 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_gpadc_bk_bat_v_print,
-                          inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
-       .open = ab8500_gpadc_bk_bat_v_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
 
-static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
 {
        int die_temp_raw;
        int die_temp_convert;
@@ -2063,21 +1905,9 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_gpadc_die_temp_print,
-                          inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
 
-static const struct file_operations ab8500_gpadc_die_temp_fops = {
-       .open = ab8500_gpadc_die_temp_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
 {
        int usb_id_raw;
        int usb_id_convert;
@@ -2094,20 +1924,9 @@ static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8500_gpadc_usb_id_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8500_gpadc_usb_id_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_id_fops = {
-       .open = ab8500_gpadc_usb_id_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
 
-static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
 {
        int xtal_temp_raw;
        int xtal_temp_convert;
@@ -2124,21 +1943,9 @@ static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8540_gpadc_xtal_temp_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8540_gpadc_xtal_temp_print,
-               inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
 
-static const struct file_operations ab8540_gpadc_xtal_temp_fops = {
-       .open = ab8540_gpadc_xtal_temp_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
 {
        int vbat_true_meas_raw;
        int vbat_true_meas_convert;
@@ -2156,22 +1963,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8540_gpadc_vbat_true_meas_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, ab8540_gpadc_vbat_true_meas_print,
-               inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
 
-static const struct file_operations ab8540_gpadc_vbat_true_meas_fops = {
-       .open = ab8540_gpadc_vbat_true_meas_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
 {
        int bat_ctrl_raw;
        int bat_ctrl_convert;
@@ -2197,22 +1991,9 @@ static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8540_gpadc_bat_ctrl_and_ibat_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, ab8540_gpadc_bat_ctrl_and_ibat_print,
-               inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_ctrl_and_ibat_fops = {
-       .open = ab8540_gpadc_bat_ctrl_and_ibat_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
 
-static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
 {
        int vbat_meas_raw;
        int vbat_meas_convert;
@@ -2237,23 +2018,9 @@ static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8540_gpadc_vbat_meas_and_ibat_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, ab8540_gpadc_vbat_meas_and_ibat_print,
-               inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
 
-static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = {
-       .open = ab8540_gpadc_vbat_meas_and_ibat_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
-                                                     void *p)
+static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
 {
        int vbat_true_meas_raw;
        int vbat_true_meas_convert;
@@ -2279,23 +2046,9 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
        return 0;
 }
 
-static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, ab8540_gpadc_vbat_true_meas_and_ibat_print,
-               inode->i_private);
-}
-
-static const struct file_operations
-ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
-       .open = ab8540_gpadc_vbat_true_meas_and_ibat_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
 
-static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
 {
        int bat_temp_raw;
        int bat_temp_convert;
@@ -2320,22 +2073,9 @@ static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8540_gpadc_bat_temp_and_ibat_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, ab8540_gpadc_bat_temp_and_ibat_print,
-               inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_temp_and_ibat_fops = {
-       .open = ab8540_gpadc_bat_temp_and_ibat_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
 
-static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
 {
        struct ab8500_gpadc *gpadc;
        u16 vmain_l, vmain_h, btemp_l, btemp_h;
@@ -2359,18 +2099,7 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ab8540_gpadc_otp_cal_print, inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_otp_calib_fops = {
-       .open = ab8540_gpadc_otp_cal_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
 
 static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
 {
@@ -2903,14 +2632,6 @@ static const struct file_operations ab8500_val_fops = {
        .owner = THIS_MODULE,
 };
 
-static const struct file_operations ab8500_interrupts_fops = {
-       .open = ab8500_interrupts_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
 static const struct file_operations ab8500_subscribe_fops = {
        .open = ab8500_subscribe_unsubscribe_open,
        .write = ab8500_subscribe_write,
@@ -2997,7 +2718,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
                goto err;
 
        file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
-                                  &plf->dev, &ab8500_registers_fops);
+                                  &plf->dev, &ab8500_bank_registers_fops);
        if (!file)
                goto err;
 
index 09cf369..a307832 100644 (file)
@@ -184,6 +184,7 @@ static struct irq_chip arizona_irq_chip = {
 };
 
 static struct lock_class_key arizona_irq_lock_class;
+static struct lock_class_key arizona_irq_request_class;
 
 static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
                              irq_hw_number_t hw)
@@ -191,7 +192,8 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
        struct arizona *data = h->host_data;
 
        irq_set_chip_data(virq, data);
-       irq_set_lockdep_class(virq, &arizona_irq_lock_class);
+       irq_set_lockdep_class(virq, &arizona_irq_lock_class,
+               &arizona_irq_request_class);
        irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
        irq_set_nested_thread(virq, 1);
        irq_set_noprobe(virq);
index 064bde9..f684a93 100644 (file)
 #define FLEX_MR_OPMODE(opmode) (((opmode) << FLEX_MR_OPMODE_OFFSET) &  \
                                 FLEX_MR_OPMODE_MASK)
 
+struct atmel_flexcom {
+       void __iomem *base;
+       u32 opmode;
+       struct clk *clk;
+};
 
 static int atmel_flexcom_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
-       struct clk *clk;
        struct resource *res;
-       void __iomem *base;
-       u32 opmode;
+       struct atmel_flexcom *ddata;
        int err;
 
-       err = of_property_read_u32(np, "atmel,flexcom-mode", &opmode);
+       ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+       if (!ddata)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, ddata);
+
+       err = of_property_read_u32(np, "atmel,flexcom-mode", &ddata->opmode);
        if (err)
                return err;
 
-       if (opmode < ATMEL_FLEXCOM_MODE_USART ||
-           opmode > ATMEL_FLEXCOM_MODE_TWI)
+       if (ddata->opmode < ATMEL_FLEXCOM_MODE_USART ||
+           ddata->opmode > ATMEL_FLEXCOM_MODE_TWI)
                return -EINVAL;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
+       ddata->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(ddata->base))
+               return PTR_ERR(ddata->base);
 
-       clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(clk))
-               return PTR_ERR(clk);
+       ddata->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(ddata->clk))
+               return PTR_ERR(ddata->clk);
 
-       err = clk_prepare_enable(clk);
+       err = clk_prepare_enable(ddata->clk);
        if (err)
                return err;
 
@@ -76,9 +85,9 @@ static int atmel_flexcom_probe(struct platform_device *pdev)
         * inaccessible and are read as zero. Also the external I/O lines of the
         * Flexcom are muxed to reach the selected device.
         */
-       writel(FLEX_MR_OPMODE(opmode), base + FLEX_MR);
+       writel(FLEX_MR_OPMODE(ddata->opmode), ddata->base + FLEX_MR);
 
-       clk_disable_unprepare(clk);
+       clk_disable_unprepare(ddata->clk);
 
        return devm_of_platform_populate(&pdev->dev);
 }
@@ -89,10 +98,34 @@ static const struct of_device_id atmel_flexcom_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match);
 
+#ifdef CONFIG_PM_SLEEP
+static int atmel_flexcom_resume(struct device *dev)
+{
+       struct atmel_flexcom *ddata = dev_get_drvdata(dev);
+       int err;
+       u32 val;
+
+       err = clk_prepare_enable(ddata->clk);
+       if (err)
+               return err;
+
+       val = FLEX_MR_OPMODE(ddata->opmode),
+       writel(val, ddata->base + FLEX_MR);
+
+       clk_disable_unprepare(ddata->clk);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL,
+                        atmel_flexcom_resume);
+
 static struct platform_driver atmel_flexcom_driver = {
        .probe  = atmel_flexcom_probe,
        .driver = {
                .name           = "atmel_flexcom",
+               .pm             = &atmel_flexcom_pm_ops,
                .of_match_table = atmel_flexcom_of_match,
        },
 };
index 2468b43..e94c72c 100644 (file)
@@ -129,6 +129,7 @@ static const struct regmap_range axp288_volatile_ranges[] = {
        regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
        regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
        regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
+       regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
        regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
        regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
        regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
@@ -878,6 +879,9 @@ static struct mfd_cell axp813_cells[] = {
                .resources              = axp803_pek_resources,
        }, {
                .name                   = "axp20x-regulator",
+       }, {
+               .name                   = "axp20x-gpio",
+               .of_compatible          = "x-powers,axp813-gpio",
        }
 };
 
index b0ca5a4..d610241 100644 (file)
@@ -40,13 +40,13 @@ static struct cros_ec_platform pd_p = {
 };
 
 static const struct mfd_cell ec_cell = {
-       .name = "cros-ec-ctl",
+       .name = "cros-ec-dev",
        .platform_data = &ec_p,
        .pdata_size = sizeof(ec_p),
 };
 
 static const struct mfd_cell ec_pd_cell = {
-       .name = "cros-ec-ctl",
+       .name = "cros-ec-dev",
        .platform_data = &pd_p,
        .pdata_size = sizeof(pd_p),
 };
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
new file mode 100644 (file)
index 0000000..e4fafdd
--- /dev/null
@@ -0,0 +1,552 @@
+/*
+ * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fs.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cros_ec_dev.h"
+
+#define DRV_NAME "cros-ec-dev"
+
+/* Device variables */
+#define CROS_MAX_DEV 128
+static int ec_major;
+
+static const struct attribute_group *cros_ec_groups[] = {
+       &cros_ec_attr_group,
+       &cros_ec_lightbar_attr_group,
+       &cros_ec_vbc_attr_group,
+       NULL,
+};
+
+static struct class cros_class = {
+       .owner          = THIS_MODULE,
+       .name           = "chromeos",
+       .dev_groups     = cros_ec_groups,
+};
+
+/* Basic communication */
+static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
+{
+       struct ec_response_get_version *resp;
+       static const char * const current_image_name[] = {
+               "unknown", "read-only", "read-write", "invalid",
+       };
+       struct cros_ec_command *msg;
+       int ret;
+
+       msg = kmalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->version = 0;
+       msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
+       msg->insize = sizeof(*resp);
+       msg->outsize = 0;
+
+       ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+       if (ret < 0)
+               goto exit;
+
+       if (msg->result != EC_RES_SUCCESS) {
+               snprintf(str, maxlen,
+                        "%s\nUnknown EC version: EC returned %d\n",
+                        CROS_EC_DEV_VERSION, msg->result);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       resp = (struct ec_response_get_version *)msg->data;
+       if (resp->current_image >= ARRAY_SIZE(current_image_name))
+               resp->current_image = 3; /* invalid */
+
+       snprintf(str, maxlen, "%s\n%s\n%s\n%s\n", CROS_EC_DEV_VERSION,
+                resp->version_string_ro, resp->version_string_rw,
+                current_image_name[resp->current_image]);
+
+       ret = 0;
+exit:
+       kfree(msg);
+       return ret;
+}
+
+static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+{
+       struct cros_ec_command *msg;
+       int ret;
+
+       if (ec->features[0] == -1U && ec->features[1] == -1U) {
+               /* features bitmap not read yet */
+
+               msg = kmalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
+               if (!msg)
+                       return -ENOMEM;
+
+               msg->version = 0;
+               msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
+               msg->insize = sizeof(ec->features);
+               msg->outsize = 0;
+
+               ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+               if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+                       dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
+                                ret, msg->result);
+                       memset(ec->features, 0, sizeof(ec->features));
+               }
+
+               memcpy(ec->features, msg->data, sizeof(ec->features));
+
+               dev_dbg(ec->dev, "EC features %08x %08x\n",
+                       ec->features[0], ec->features[1]);
+
+               kfree(msg);
+       }
+
+       return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
+}
+
+/* Device file ops */
+static int ec_device_open(struct inode *inode, struct file *filp)
+{
+       struct cros_ec_dev *ec = container_of(inode->i_cdev,
+                                             struct cros_ec_dev, cdev);
+       filp->private_data = ec;
+       nonseekable_open(inode, filp);
+       return 0;
+}
+
+static int ec_device_release(struct inode *inode, struct file *filp)
+{
+       return 0;
+}
+
+static ssize_t ec_device_read(struct file *filp, char __user *buffer,
+                             size_t length, loff_t *offset)
+{
+       struct cros_ec_dev *ec = filp->private_data;
+       char msg[sizeof(struct ec_response_get_version) +
+                sizeof(CROS_EC_DEV_VERSION)];
+       size_t count;
+       int ret;
+
+       if (*offset != 0)
+               return 0;
+
+       ret = ec_get_version(ec, msg, sizeof(msg));
+       if (ret)
+               return ret;
+
+       count = min(length, strlen(msg));
+
+       if (copy_to_user(buffer, msg, count))
+               return -EFAULT;
+
+       *offset = count;
+       return count;
+}
+
+/* Ioctls */
+static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
+{
+       long ret;
+       struct cros_ec_command u_cmd;
+       struct cros_ec_command *s_cmd;
+
+       if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
+               return -EFAULT;
+
+       if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
+           (u_cmd.insize > EC_MAX_MSG_BYTES))
+               return -EINVAL;
+
+       s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+                       GFP_KERNEL);
+       if (!s_cmd)
+               return -ENOMEM;
+
+       if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) {
+               ret = -EFAULT;
+               goto exit;
+       }
+
+       if (u_cmd.outsize != s_cmd->outsize ||
+           u_cmd.insize != s_cmd->insize) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       s_cmd->command += ec->cmd_offset;
+       ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
+       /* Only copy data to userland if data was received. */
+       if (ret < 0)
+               goto exit;
+
+       if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
+               ret = -EFAULT;
+exit:
+       kfree(s_cmd);
+       return ret;
+}
+
+static long ec_device_ioctl_readmem(struct cros_ec_dev *ec, void __user *arg)
+{
+       struct cros_ec_device *ec_dev = ec->ec_dev;
+       struct cros_ec_readmem s_mem = { };
+       long num;
+
+       /* Not every platform supports direct reads */
+       if (!ec_dev->cmd_readmem)
+               return -ENOTTY;
+
+       if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
+               return -EFAULT;
+
+       num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes,
+                                 s_mem.buffer);
+       if (num <= 0)
+               return num;
+
+       if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long ec_device_ioctl(struct file *filp, unsigned int cmd,
+                           unsigned long arg)
+{
+       struct cros_ec_dev *ec = filp->private_data;
+
+       if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC)
+               return -ENOTTY;
+
+       switch (cmd) {
+       case CROS_EC_DEV_IOCXCMD:
+               return ec_device_ioctl_xcmd(ec, (void __user *)arg);
+       case CROS_EC_DEV_IOCRDMEM:
+               return ec_device_ioctl_readmem(ec, (void __user *)arg);
+       }
+
+       return -ENOTTY;
+}
+
+/* Module initialization */
+static const struct file_operations fops = {
+       .open = ec_device_open,
+       .release = ec_device_release,
+       .read = ec_device_read,
+       .unlocked_ioctl = ec_device_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = ec_device_ioctl,
+#endif
+};
+
+static void __remove(struct device *dev)
+{
+       struct cros_ec_dev *ec = container_of(dev, struct cros_ec_dev,
+                                             class_dev);
+       kfree(ec);
+}
+
+static void cros_ec_sensors_register(struct cros_ec_dev *ec)
+{
+       /*
+        * Issue a command to get the number of sensor reported.
+        * Build an array of sensors driver and register them all.
+        */
+       int ret, i, id, sensor_num;
+       struct mfd_cell *sensor_cells;
+       struct cros_ec_sensor_platform *sensor_platforms;
+       int sensor_type[MOTIONSENSE_TYPE_MAX];
+       struct ec_params_motion_sense *params;
+       struct ec_response_motion_sense *resp;
+       struct cros_ec_command *msg;
+
+       msg = kzalloc(sizeof(struct cros_ec_command) +
+                     max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
+       if (msg == NULL)
+               return;
+
+       msg->version = 2;
+       msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+       msg->outsize = sizeof(*params);
+       msg->insize = sizeof(*resp);
+
+       params = (struct ec_params_motion_sense *)msg->data;
+       params->cmd = MOTIONSENSE_CMD_DUMP;
+
+       ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+       if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+               dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
+                        ret, msg->result);
+               goto error;
+       }
+
+       resp = (struct ec_response_motion_sense *)msg->data;
+       sensor_num = resp->dump.sensor_count;
+       /* Allocate 2 extra sensors in case lid angle or FIFO are needed */
+       sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2),
+                              GFP_KERNEL);
+       if (sensor_cells == NULL)
+               goto error;
+
+       sensor_platforms = kzalloc(sizeof(struct cros_ec_sensor_platform) *
+                 (sensor_num + 1), GFP_KERNEL);
+       if (sensor_platforms == NULL)
+               goto error_platforms;
+
+       memset(sensor_type, 0, sizeof(sensor_type));
+       id = 0;
+       for (i = 0; i < sensor_num; i++) {
+               params->cmd = MOTIONSENSE_CMD_INFO;
+               params->info.sensor_num = i;
+               ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+               if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+                       dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
+                                i, ret, msg->result);
+                       continue;
+               }
+               switch (resp->info.type) {
+               case MOTIONSENSE_TYPE_ACCEL:
+                       sensor_cells[id].name = "cros-ec-accel";
+                       break;
+               case MOTIONSENSE_TYPE_BARO:
+                       sensor_cells[id].name = "cros-ec-baro";
+                       break;
+               case MOTIONSENSE_TYPE_GYRO:
+                       sensor_cells[id].name = "cros-ec-gyro";
+                       break;
+               case MOTIONSENSE_TYPE_MAG:
+                       sensor_cells[id].name = "cros-ec-mag";
+                       break;
+               case MOTIONSENSE_TYPE_PROX:
+                       sensor_cells[id].name = "cros-ec-prox";
+                       break;
+               case MOTIONSENSE_TYPE_LIGHT:
+                       sensor_cells[id].name = "cros-ec-light";
+                       break;
+               case MOTIONSENSE_TYPE_ACTIVITY:
+                       sensor_cells[id].name = "cros-ec-activity";
+                       break;
+               default:
+                       dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
+                       continue;
+               }
+               sensor_platforms[id].sensor_num = i;
+               sensor_cells[id].id = sensor_type[resp->info.type];
+               sensor_cells[id].platform_data = &sensor_platforms[id];
+               sensor_cells[id].pdata_size =
+                       sizeof(struct cros_ec_sensor_platform);
+
+               sensor_type[resp->info.type]++;
+               id++;
+       }
+       if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) {
+               sensor_platforms[id].sensor_num = sensor_num;
+
+               sensor_cells[id].name = "cros-ec-angle";
+               sensor_cells[id].id = 0;
+               sensor_cells[id].platform_data = &sensor_platforms[id];
+               sensor_cells[id].pdata_size =
+                       sizeof(struct cros_ec_sensor_platform);
+               id++;
+       }
+       if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+               sensor_cells[id].name = "cros-ec-ring";
+               id++;
+       }
+
+       ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
+                             NULL, 0, NULL);
+       if (ret)
+               dev_err(ec->dev, "failed to add EC sensors\n");
+
+       kfree(sensor_platforms);
+error_platforms:
+       kfree(sensor_cells);
+error:
+       kfree(msg);
+}
+
+static int ec_device_probe(struct platform_device *pdev)
+{
+       int retval = -ENOMEM;
+       struct device *dev = &pdev->dev;
+       struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
+       struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
+
+       if (!ec)
+               return retval;
+
+       dev_set_drvdata(dev, ec);
+       ec->ec_dev = dev_get_drvdata(dev->parent);
+       ec->dev = dev;
+       ec->cmd_offset = ec_platform->cmd_offset;
+       ec->features[0] = -1U; /* Not cached yet */
+       ec->features[1] = -1U; /* Not cached yet */
+       device_initialize(&ec->class_dev);
+       cdev_init(&ec->cdev, &fops);
+
+       /*
+        * Add the class device
+        * Link to the character device for creating the /dev entry
+        * in devtmpfs.
+        */
+       ec->class_dev.devt = MKDEV(ec_major, pdev->id);
+       ec->class_dev.class = &cros_class;
+       ec->class_dev.parent = dev;
+       ec->class_dev.release = __remove;
+
+       retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name);
+       if (retval) {
+               dev_err(dev, "dev_set_name failed => %d\n", retval);
+               goto failed;
+       }
+
+       retval = cdev_device_add(&ec->cdev, &ec->class_dev);
+       if (retval) {
+               dev_err(dev, "cdev_device_add failed => %d\n", retval);
+               goto failed;
+       }
+
+       if (cros_ec_debugfs_init(ec))
+               dev_warn(dev, "failed to create debugfs directory\n");
+
+       /* check whether this EC is a sensor hub. */
+       if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
+               cros_ec_sensors_register(ec);
+
+       /* Take control of the lightbar from the EC. */
+       lb_manual_suspend_ctrl(ec, 1);
+
+       return 0;
+
+failed:
+       put_device(&ec->class_dev);
+       return retval;
+}
+
+static int ec_device_remove(struct platform_device *pdev)
+{
+       struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev);
+
+       /* Let the EC take over the lightbar again. */
+       lb_manual_suspend_ctrl(ec, 0);
+
+       cros_ec_debugfs_remove(ec);
+
+       cdev_del(&ec->cdev);
+       device_unregister(&ec->class_dev);
+       return 0;
+}
+
+static const struct platform_device_id cros_ec_id[] = {
+       { DRV_NAME, 0 },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_id);
+
+static __maybe_unused int ec_device_suspend(struct device *dev)
+{
+       struct cros_ec_dev *ec = dev_get_drvdata(dev);
+
+       lb_suspend(ec);
+
+       return 0;
+}
+
+static __maybe_unused int ec_device_resume(struct device *dev)
+{
+       struct cros_ec_dev *ec = dev_get_drvdata(dev);
+
+       lb_resume(ec);
+
+       return 0;
+}
+
+static const struct dev_pm_ops cros_ec_dev_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+       .suspend = ec_device_suspend,
+       .resume = ec_device_resume,
+#endif
+};
+
+static struct platform_driver cros_ec_dev_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .pm = &cros_ec_dev_pm_ops,
+       },
+       .probe = ec_device_probe,
+       .remove = ec_device_remove,
+};
+
+static int __init cros_ec_dev_init(void)
+{
+       int ret;
+       dev_t dev = 0;
+
+       ret  = class_register(&cros_class);
+       if (ret) {
+               pr_err(CROS_EC_DEV_NAME ": failed to register device class\n");
+               return ret;
+       }
+
+       /* Get a range of minor numbers (starting with 0) to work with */
+       ret = alloc_chrdev_region(&dev, 0, CROS_MAX_DEV, CROS_EC_DEV_NAME);
+       if (ret < 0) {
+               pr_err(CROS_EC_DEV_NAME ": alloc_chrdev_region() failed\n");
+               goto failed_chrdevreg;
+       }
+       ec_major = MAJOR(dev);
+
+       /* Register the driver */
+       ret = platform_driver_register(&cros_ec_dev_driver);
+       if (ret < 0) {
+               pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret);
+               goto failed_devreg;
+       }
+       return 0;
+
+failed_devreg:
+       unregister_chrdev_region(MKDEV(ec_major, 0), CROS_MAX_DEV);
+failed_chrdevreg:
+       class_unregister(&cros_class);
+       return ret;
+}
+
+static void __exit cros_ec_dev_exit(void)
+{
+       platform_driver_unregister(&cros_ec_dev_driver);
+       unregister_chrdev(ec_major, CROS_EC_DEV_NAME);
+       class_unregister(&cros_class);
+}
+
+module_init(cros_ec_dev_init);
+module_exit(cros_ec_dev_exit);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
+MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h
new file mode 100644 (file)
index 0000000..45e9453
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * cros_ec_dev - expose the Chrome OS Embedded Controller to userspace
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CROS_EC_DEV_H_
+#define _CROS_EC_DEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/mfd/cros_ec.h>
+
+#define CROS_EC_DEV_VERSION "1.0.0"
+
+/*
+ * @offset: within EC_LPC_ADDR_MEMMAP region
+ * @bytes: number of bytes to read. zero means "read a string" (including '\0')
+ *         (at most only EC_MEMMAP_SIZE bytes can be read)
+ * @buffer: where to store the result
+ * ioctl returns the number of bytes read, negative on error
+ */
+struct cros_ec_readmem {
+       uint32_t offset;
+       uint32_t bytes;
+       uint8_t buffer[EC_MEMMAP_SIZE];
+};
+
+#define CROS_EC_DEV_IOC       0xEC
+#define CROS_EC_DEV_IOCXCMD   _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command)
+#define CROS_EC_DEV_IOCRDMEM  _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem)
+
+/* Lightbar utilities */
+extern bool ec_has_lightbar(struct cros_ec_dev *ec);
+extern int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable);
+extern int lb_suspend(struct cros_ec_dev *ec);
+extern int lb_resume(struct cros_ec_dev *ec);
+
+#endif /* _CROS_EC_DEV_H_ */
index c971407..1b52b85 100644 (file)
@@ -72,8 +72,7 @@
  * struct cros_ec_spi - information about a SPI-connected EC
  *
  * @spi: SPI device we are connected to
- * @last_transfer_ns: time that we last finished a transfer, or 0 if there
- *     if no record
+ * @last_transfer_ns: time that we last finished a transfer.
  * @start_of_msg_delay: used to set the delay_usecs on the spi_transfer that
  *      is sent when we want to turn on CS at the start of a transaction.
  * @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
@@ -377,19 +376,17 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        u8 *ptr;
        u8 *rx_buf;
        u8 sum;
+       u8 rx_byte;
        int ret = 0, final_ret;
+       unsigned long delay;
 
        len = cros_ec_prepare_tx(ec_dev, ec_msg);
        dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
 
        /* If it's too soon to do another transaction, wait */
-       if (ec_spi->last_transfer_ns) {
-               unsigned long delay;    /* The delay completed so far */
-
-               delay = ktime_get_ns() - ec_spi->last_transfer_ns;
-               if (delay < EC_SPI_RECOVERY_TIME_NS)
-                       ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
-       }
+       delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+       if (delay < EC_SPI_RECOVERY_TIME_NS)
+               ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
 
        rx_buf = kzalloc(len, GFP_KERNEL);
        if (!rx_buf)
@@ -421,25 +418,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret) {
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
-                       switch (rx_buf[i]) {
-                       case EC_SPI_PAST_END:
-                       case EC_SPI_RX_BAD_DATA:
-                       case EC_SPI_NOT_READY:
-                               ret = -EAGAIN;
-                               ec_msg->result = EC_RES_IN_PROGRESS;
-                       default:
+                       rx_byte = rx_buf[i];
+                       if (rx_byte == EC_SPI_PAST_END  ||
+                           rx_byte == EC_SPI_RX_BAD_DATA ||
+                           rx_byte == EC_SPI_NOT_READY) {
+                               ret = -EREMOTEIO;
                                break;
                        }
-                       if (ret)
-                               break;
                }
-               if (!ret)
-                       ret = cros_ec_spi_receive_packet(ec_dev,
-                                       ec_msg->insize + sizeof(*response));
-       } else {
-               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
        }
 
+       if (!ret)
+               ret = cros_ec_spi_receive_packet(ec_dev,
+                               ec_msg->insize + sizeof(*response));
+       else
+               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
        final_ret = terminate_request(ec_dev);
 
        spi_bus_unlock(ec_spi->spi->master);
@@ -508,20 +502,18 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        int i, len;
        u8 *ptr;
        u8 *rx_buf;
+       u8 rx_byte;
        int sum;
        int ret = 0, final_ret;
+       unsigned long delay;
 
        len = cros_ec_prepare_tx(ec_dev, ec_msg);
        dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
 
        /* If it's too soon to do another transaction, wait */
-       if (ec_spi->last_transfer_ns) {
-               unsigned long delay;    /* The delay completed so far */
-
-               delay = ktime_get_ns() - ec_spi->last_transfer_ns;
-               if (delay < EC_SPI_RECOVERY_TIME_NS)
-                       ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
-       }
+       delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+       if (delay < EC_SPI_RECOVERY_TIME_NS)
+               ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
 
        rx_buf = kzalloc(len, GFP_KERNEL);
        if (!rx_buf)
@@ -544,25 +536,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret) {
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
-                       switch (rx_buf[i]) {
-                       case EC_SPI_PAST_END:
-                       case EC_SPI_RX_BAD_DATA:
-                       case EC_SPI_NOT_READY:
-                               ret = -EAGAIN;
-                               ec_msg->result = EC_RES_IN_PROGRESS;
-                       default:
+                       rx_byte = rx_buf[i];
+                       if (rx_byte == EC_SPI_PAST_END  ||
+                           rx_byte == EC_SPI_RX_BAD_DATA ||
+                           rx_byte == EC_SPI_NOT_READY) {
+                               ret = -EREMOTEIO;
                                break;
                        }
-                       if (ret)
-                               break;
                }
-               if (!ret)
-                       ret = cros_ec_spi_receive_response(ec_dev,
-                                       ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
-       } else {
-               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
        }
 
+       if (!ret)
+               ret = cros_ec_spi_receive_response(ec_dev,
+                               ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+       else
+               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
        final_ret = terminate_request(ec_dev);
 
        spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +656,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
                           sizeof(struct ec_response_get_protocol_info);
        ec_dev->dout_size = sizeof(struct ec_host_request);
 
+       ec_spi->last_transfer_ns = ktime_get_ns();
 
        err = cros_ec_register(ec_dev);
        if (err) {
index 0e0ab9b..9e545eb 100644 (file)
@@ -450,6 +450,8 @@ int intel_lpss_probe(struct device *dev,
        if (ret)
                goto err_remove_ltr;
 
+       dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
        return 0;
 
 err_remove_ltr:
@@ -478,7 +480,9 @@ EXPORT_SYMBOL_GPL(intel_lpss_remove);
 
 static int resume_lpss_device(struct device *dev, void *data)
 {
-       pm_runtime_resume(dev);
+       if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+               pm_runtime_resume(dev);
+
        return 0;
 }
 
index 36adf9e..274306d 100644 (file)
@@ -16,7 +16,6 @@
  * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
  */
 
-#include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/mfd/core.h>
 #include <linux/i2c.h>
index 55d824b..390b27c 100644 (file)
@@ -458,7 +458,7 @@ static int kempld_probe(struct platform_device *pdev)
                return -EINVAL;
 
        pld->io_base = devm_ioport_map(dev, ioport->start,
-                                       ioport->end - ioport->start);
+                                       resource_size(ioport));
        if (!pld->io_base)
                return -ENOMEM;
 
index cf1120a..53dc1a4 100644 (file)
@@ -1143,11 +1143,6 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
                        res->end = res->start + SPIBASE_APL_SZ - 1;
 
                        pci_bus_read_config_dword(bus, spi, BCR, &bcr);
-                       if (!(bcr & BCR_WPD)) {
-                               bcr |= BCR_WPD;
-                               pci_bus_write_config_dword(bus, spi, BCR, bcr);
-                               pci_bus_read_config_dword(bus, spi, BCR, &bcr);
-                       }
                        info->writeable = !!(bcr & BCR_WPD);
                }
 
index dc5caea..da9612d 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77843-private.h>
index 3922a93..663a239 100644 (file)
@@ -430,6 +430,7 @@ static void palmas_power_off(void)
 {
        unsigned int addr;
        int ret, slave;
+       u8 powerhold_mask;
        struct device_node *np = palmas_dev->dev->of_node;
 
        if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
@@ -437,8 +438,15 @@ static void palmas_power_off(void)
                                          PALMAS_PRIMARY_SECONDARY_PAD2);
                slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
 
+               if (of_device_is_compatible(np, "ti,tps65917"))
+                       powerhold_mask =
+                               TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK;
+               else
+                       powerhold_mask =
+                               PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK;
+
                ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
-                               PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
+                                        powerhold_mask, 0);
                if (ret)
                        dev_err(palmas_dev->dev,
                                "Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
index 6155d12..f952dff 100644 (file)
@@ -149,7 +149,7 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
 
        *pdev = platform_device_alloc(name, -1);
        if (!*pdev) {
-               dev_err(pcf->dev, "Falied to allocate %s\n", name);
+               dev_err(pcf->dev, "Failed to allocate %s\n", name);
                return;
        }
 
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
new file mode 100644 (file)
index 0000000..5c858e7
--- /dev/null
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Multifunction core driver for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU that is connected via dedicated UART
+ * port
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#include <linux/atomic.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <asm/unaligned.h>
+
+/*
+ * UART protocol using following entities:
+ *  - message to MCU => ACK response
+ *  - event from MCU => event ACK
+ *
+ * Frame structure:
+ * <STX> <DATA> <CHECKSUM> <ETX>
+ * Where:
+ * - STX - is start of transmission character
+ * - ETX - end of transmission
+ * - DATA - payload
+ * - CHECKSUM - checksum calculated on <DATA>
+ *
+ * If <DATA> or <CHECKSUM> contain one of control characters, then it is
+ * escaped using <DLE> control code. Added <DLE> does not participate in
+ * checksum calculation.
+ */
+#define RAVE_SP_STX                    0x02
+#define RAVE_SP_ETX                    0x03
+#define RAVE_SP_DLE                    0x10
+
+#define RAVE_SP_MAX_DATA_SIZE          64
+#define RAVE_SP_CHECKSUM_SIZE          2  /* Worst case scenario on RDU2 */
+/*
+ * We don't store STX, ETX and unescaped bytes, so Rx is only
+ * DATA + CSUM
+ */
+#define RAVE_SP_RX_BUFFER_SIZE                         \
+       (RAVE_SP_MAX_DATA_SIZE + RAVE_SP_CHECKSUM_SIZE)
+
+#define RAVE_SP_STX_ETX_SIZE           2
+/*
+ * For Tx we have to have space for everything, STX, EXT and
+ * potentially stuffed DATA + CSUM data + csum
+ */
+#define RAVE_SP_TX_BUFFER_SIZE                         \
+       (RAVE_SP_STX_ETX_SIZE + 2 * RAVE_SP_RX_BUFFER_SIZE)
+
+#define RAVE_SP_BOOT_SOURCE_GET                0
+#define RAVE_SP_BOOT_SOURCE_SET                1
+
+#define RAVE_SP_RDU2_BOARD_TYPE_RMB    0
+#define RAVE_SP_RDU2_BOARD_TYPE_DEB    1
+
+#define RAVE_SP_BOOT_SOURCE_SD         0
+#define RAVE_SP_BOOT_SOURCE_EMMC       1
+#define RAVE_SP_BOOT_SOURCE_NOR                2
+
+/**
+ * enum rave_sp_deframer_state - Possible state for de-framer
+ *
+ * @RAVE_SP_EXPECT_SOF:                 Scanning input for start-of-frame marker
+ * @RAVE_SP_EXPECT_DATA:        Got start of frame marker, collecting frame
+ * @RAVE_SP_EXPECT_ESCAPED_DATA: Got escape character, collecting escaped byte
+ */
+enum rave_sp_deframer_state {
+       RAVE_SP_EXPECT_SOF,
+       RAVE_SP_EXPECT_DATA,
+       RAVE_SP_EXPECT_ESCAPED_DATA,
+};
+
+/**
+ * struct rave_sp_deframer - Device protocol deframer
+ *
+ * @state:  Current state of the deframer
+ * @data:   Buffer used to collect deframed data
+ * @length: Number of bytes de-framed so far
+ */
+struct rave_sp_deframer {
+       enum rave_sp_deframer_state state;
+       unsigned char data[RAVE_SP_RX_BUFFER_SIZE];
+       size_t length;
+};
+
+/**
+ * struct rave_sp_reply - Reply as per RAVE device protocol
+ *
+ * @length:    Expected reply length
+ * @data:      Buffer to store reply payload in
+ * @code:      Expected reply code
+ * @ackid:     Expected reply ACK ID
+ * @completion: Successful reply reception completion
+ */
+struct rave_sp_reply {
+       size_t length;
+       void  *data;
+       u8     code;
+       u8     ackid;
+       struct completion received;
+};
+
+/**
+ * struct rave_sp_checksum - Variant specific checksum implementation details
+ *
+ * @length:    Caculated checksum length
+ * @subroutine:        Utilized checksum algorithm implementation
+ */
+struct rave_sp_checksum {
+       size_t length;
+       void (*subroutine)(const u8 *, size_t, u8 *);
+};
+
+/**
+ * struct rave_sp_variant_cmds - Variant specific command routines
+ *
+ * @translate: Generic to variant specific command mapping routine
+ *
+ */
+struct rave_sp_variant_cmds {
+       int (*translate)(enum rave_sp_command);
+};
+
+/**
+ * struct rave_sp_variant - RAVE supervisory processor core variant
+ *
+ * @checksum:  Variant specific checksum implementation
+ * @cmd:       Variant specific command pointer table
+ *
+ */
+struct rave_sp_variant {
+       const struct rave_sp_checksum *checksum;
+       struct rave_sp_variant_cmds cmd;
+};
+
+/**
+ * struct rave_sp - RAVE supervisory processor core
+ *
+ * @serdev:                    Pointer to underlying serdev
+ * @deframer:                  Stored state of the protocol deframer
+ * @ackid:                     ACK ID used in last reply sent to the device
+ * @bus_lock:                  Lock to serialize access to the device
+ * @reply_lock:                        Lock protecting @reply
+ * @reply:                     Pointer to memory to store reply payload
+ *
+ * @variant:                   Device variant specific information
+ * @event_notifier_list:       Input event notification chain
+ *
+ */
+struct rave_sp {
+       struct serdev_device *serdev;
+       struct rave_sp_deframer deframer;
+       atomic_t ackid;
+       struct mutex bus_lock;
+       struct mutex reply_lock;
+       struct rave_sp_reply *reply;
+
+       const struct rave_sp_variant *variant;
+       struct blocking_notifier_head event_notifier_list;
+};
+
+static bool rave_sp_id_is_event(u8 code)
+{
+       return (code & 0xF0) == RAVE_SP_EVNT_BASE;
+}
+
+static void rave_sp_unregister_event_notifier(struct device *dev, void *res)
+{
+       struct rave_sp *sp = dev_get_drvdata(dev->parent);
+       struct notifier_block *nb = *(struct notifier_block **)res;
+       struct blocking_notifier_head *bnh = &sp->event_notifier_list;
+
+       WARN_ON(blocking_notifier_chain_unregister(bnh, nb));
+}
+
+int devm_rave_sp_register_event_notifier(struct device *dev,
+                                        struct notifier_block *nb)
+{
+       struct rave_sp *sp = dev_get_drvdata(dev->parent);
+       struct notifier_block **rcnb;
+       int ret;
+
+       rcnb = devres_alloc(rave_sp_unregister_event_notifier,
+                           sizeof(*rcnb), GFP_KERNEL);
+       if (!rcnb)
+               return -ENOMEM;
+
+       ret = blocking_notifier_chain_register(&sp->event_notifier_list, nb);
+       if (!ret) {
+               *rcnb = nb;
+               devres_add(dev, rcnb);
+       } else {
+               devres_free(rcnb);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devm_rave_sp_register_event_notifier);
+
+static void csum_8b2c(const u8 *buf, size_t size, u8 *crc)
+{
+       *crc = *buf++;
+       size--;
+
+       while (size--)
+               *crc += *buf++;
+
+       *crc = 1 + ~(*crc);
+}
+
+static void csum_ccitt(const u8 *buf, size_t size, u8 *crc)
+{
+       const u16 calculated = crc_ccitt_false(0xffff, buf, size);
+
+       /*
+        * While the rest of the wire protocol is little-endian,
+        * CCITT-16 CRC in RDU2 device is sent out in big-endian order.
+        */
+       put_unaligned_be16(calculated, crc);
+}
+
+static void *stuff(unsigned char *dest, const unsigned char *src, size_t n)
+{
+       while (n--) {
+               const unsigned char byte = *src++;
+
+               switch (byte) {
+               case RAVE_SP_STX:
+               case RAVE_SP_ETX:
+               case RAVE_SP_DLE:
+                       *dest++ = RAVE_SP_DLE;
+                       /* FALLTHROUGH */
+               default:
+                       *dest++ = byte;
+               }
+       }
+
+       return dest;
+}
+
+static int rave_sp_write(struct rave_sp *sp, const u8 *data, u8 data_size)
+{
+       const size_t checksum_length = sp->variant->checksum->length;
+       unsigned char frame[RAVE_SP_TX_BUFFER_SIZE];
+       unsigned char crc[RAVE_SP_CHECKSUM_SIZE];
+       unsigned char *dest = frame;
+       size_t length;
+
+       if (WARN_ON(checksum_length > sizeof(crc)))
+               return -ENOMEM;
+
+       if (WARN_ON(data_size > sizeof(frame)))
+               return -ENOMEM;
+
+       sp->variant->checksum->subroutine(data, data_size, crc);
+
+       *dest++ = RAVE_SP_STX;
+       dest = stuff(dest, data, data_size);
+       dest = stuff(dest, crc, checksum_length);
+       *dest++ = RAVE_SP_ETX;
+
+       length = dest - frame;
+
+       print_hex_dump(KERN_DEBUG, "rave-sp tx: ", DUMP_PREFIX_NONE,
+                      16, 1, frame, length, false);
+
+       return serdev_device_write(sp->serdev, frame, length, HZ);
+}
+
+static u8 rave_sp_reply_code(u8 command)
+{
+       /*
+        * There isn't a single rule that describes command code ->
+        * ACK code transformation, but, going through various
+        * versions of ICDs, there appear to be three distinct groups
+        * that can be described by simple transformation.
+        */
+       switch (command) {
+       case 0xA0 ... 0xBE:
+               /*
+                * Commands implemented by firmware found in RDU1 and
+                * older devices all seem to obey the following rule
+                */
+               return command + 0x20;
+       case 0xE0 ... 0xEF:
+               /*
+                * Events emitted by all versions of the firmare use
+                * least significant bit to get an ACK code
+                */
+               return command | 0x01;
+       default:
+               /*
+                * Commands implemented by firmware found in RDU2 are
+                * similar to "old" commands, but they use slightly
+                * different offset
+                */
+               return command + 0x40;
+       }
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+                void *__data,  size_t data_size,
+                void *reply_data, size_t reply_data_size)
+{
+       struct rave_sp_reply reply = {
+               .data     = reply_data,
+               .length   = reply_data_size,
+               .received = COMPLETION_INITIALIZER_ONSTACK(reply.received),
+       };
+       unsigned char *data = __data;
+       int command, ret = 0;
+       u8 ackid;
+
+       command = sp->variant->cmd.translate(data[0]);
+       if (command < 0)
+               return command;
+
+       ackid       = atomic_inc_return(&sp->ackid);
+       reply.ackid = ackid;
+       reply.code  = rave_sp_reply_code((u8)command),
+
+       mutex_lock(&sp->bus_lock);
+
+       mutex_lock(&sp->reply_lock);
+       sp->reply = &reply;
+       mutex_unlock(&sp->reply_lock);
+
+       data[0] = command;
+       data[1] = ackid;
+
+       rave_sp_write(sp, data, data_size);
+
+       if (!wait_for_completion_timeout(&reply.received, HZ)) {
+               dev_err(&sp->serdev->dev, "Command timeout\n");
+               ret = -ETIMEDOUT;
+
+               mutex_lock(&sp->reply_lock);
+               sp->reply = NULL;
+               mutex_unlock(&sp->reply_lock);
+       }
+
+       mutex_unlock(&sp->bus_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rave_sp_exec);
+
+static void rave_sp_receive_event(struct rave_sp *sp,
+                                 const unsigned char *data, size_t length)
+{
+       u8 cmd[] = {
+               [0] = rave_sp_reply_code(data[0]),
+               [1] = data[1],
+       };
+
+       rave_sp_write(sp, cmd, sizeof(cmd));
+
+       blocking_notifier_call_chain(&sp->event_notifier_list,
+                                    rave_sp_action_pack(data[0], data[2]),
+                                    NULL);
+}
+
+static void rave_sp_receive_reply(struct rave_sp *sp,
+                                 const unsigned char *data, size_t length)
+{
+       struct device *dev = &sp->serdev->dev;
+       struct rave_sp_reply *reply;
+       const  size_t payload_length = length - 2;
+
+       mutex_lock(&sp->reply_lock);
+       reply = sp->reply;
+
+       if (reply) {
+               if (reply->code == data[0] && reply->ackid == data[1] &&
+                   payload_length >= reply->length) {
+                       /*
+                        * We are relying on memcpy(dst, src, 0) to be a no-op
+                        * when handling commands that have a no-payload reply
+                        */
+                       memcpy(reply->data, &data[2], reply->length);
+                       complete(&reply->received);
+                       sp->reply = NULL;
+               } else {
+                       dev_err(dev, "Ignoring incorrect reply\n");
+                       dev_dbg(dev, "Code:   expected = 0x%08x received = 0x%08x\n",
+                               reply->code, data[0]);
+                       dev_dbg(dev, "ACK ID: expected = 0x%08x received = 0x%08x\n",
+                               reply->ackid, data[1]);
+                       dev_dbg(dev, "Length: expected = %zu received = %zu\n",
+                               reply->length, payload_length);
+               }
+       }
+
+       mutex_unlock(&sp->reply_lock);
+}
+
+static void rave_sp_receive_frame(struct rave_sp *sp,
+                                 const unsigned char *data,
+                                 size_t length)
+{
+       const size_t checksum_length = sp->variant->checksum->length;
+       const size_t payload_length  = length - checksum_length;
+       const u8 *crc_reported       = &data[payload_length];
+       struct device *dev           = &sp->serdev->dev;
+       u8 crc_calculated[checksum_length];
+
+       print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
+                      16, 1, data, length, false);
+
+       if (unlikely(length <= checksum_length)) {
+               dev_warn(dev, "Dropping short frame\n");
+               return;
+       }
+
+       sp->variant->checksum->subroutine(data, payload_length,
+                                         crc_calculated);
+
+       if (memcmp(crc_calculated, crc_reported, checksum_length)) {
+               dev_warn(dev, "Dropping bad frame\n");
+               return;
+       }
+
+       if (rave_sp_id_is_event(data[0]))
+               rave_sp_receive_event(sp, data, length);
+       else
+               rave_sp_receive_reply(sp, data, length);
+}
+
+static int rave_sp_receive_buf(struct serdev_device *serdev,
+                              const unsigned char *buf, size_t size)
+{
+       struct device *dev = &serdev->dev;
+       struct rave_sp *sp = dev_get_drvdata(dev);
+       struct rave_sp_deframer *deframer = &sp->deframer;
+       const unsigned char *src = buf;
+       const unsigned char *end = buf + size;
+
+       while (src < end) {
+               const unsigned char byte = *src++;
+
+               switch (deframer->state) {
+               case RAVE_SP_EXPECT_SOF:
+                       if (byte == RAVE_SP_STX)
+                               deframer->state = RAVE_SP_EXPECT_DATA;
+                       break;
+
+               case RAVE_SP_EXPECT_DATA:
+                       /*
+                        * Treat special byte values first
+                        */
+                       switch (byte) {
+                       case RAVE_SP_ETX:
+                               rave_sp_receive_frame(sp,
+                                                     deframer->data,
+                                                     deframer->length);
+                               /*
+                                * Once we extracted a complete frame
+                                * out of a stream, we call it done
+                                * and proceed to bailing out while
+                                * resetting the framer to initial
+                                * state, regardless if we've consumed
+                                * all of the stream or not.
+                                */
+                               goto reset_framer;
+                       case RAVE_SP_STX:
+                               dev_warn(dev, "Bad frame: STX before ETX\n");
+                               /*
+                                * If we encounter second "start of
+                                * the frame" marker before seeing
+                                * corresponding "end of frame", we
+                                * reset the framer and ignore both:
+                                * frame started by first SOF and
+                                * frame started by current SOF.
+                                *
+                                * NOTE: The above means that only the
+                                * frame started by third SOF, sent
+                                * after this one will have a chance
+                                * to get throught.
+                                */
+                               goto reset_framer;
+                       case RAVE_SP_DLE:
+                               deframer->state = RAVE_SP_EXPECT_ESCAPED_DATA;
+                               /*
+                                * If we encounter escape sequence we
+                                * need to skip it and collect the
+                                * byte that follows. We do it by
+                                * forcing the next iteration of the
+                                * encompassing while loop.
+                                */
+                               continue;
+                       }
+                       /*
+                        * For the rest of the bytes, that are not
+                        * speical snoflakes, we do the same thing
+                        * that we do to escaped data - collect it in
+                        * deframer buffer
+                        */
+
+                       /* FALLTHROUGH */
+
+               case RAVE_SP_EXPECT_ESCAPED_DATA:
+                       deframer->data[deframer->length++] = byte;
+
+                       if (deframer->length == sizeof(deframer->data)) {
+                               dev_warn(dev, "Bad frame: Too long\n");
+                               /*
+                                * If the amount of data we've
+                                * accumulated for current frame so
+                                * far starts to exceed the capacity
+                                * of deframer's buffer, there's
+                                * nothing else we can do but to
+                                * discard that data and start
+                                * assemblying a new frame again
+                                */
+                               goto reset_framer;
+                       }
+
+                       /*
+                        * We've extracted out special byte, now we
+                        * can go back to regular data collecting
+                        */
+                       deframer->state = RAVE_SP_EXPECT_DATA;
+                       break;
+               }
+       }
+
+       /*
+        * The only way to get out of the above loop and end up here
+        * is throught consuming all of the supplied data, so here we
+        * report that we processed it all.
+        */
+       return size;
+
+reset_framer:
+       /*
+        * NOTE: A number of codepaths that will drop us here will do
+        * so before consuming all 'size' bytes of the data passed by
+        * serdev layer. We rely on the fact that serdev layer will
+        * re-execute this handler with the remainder of the Rx bytes
+        * once we report actual number of bytes that we processed.
+        */
+       deframer->state  = RAVE_SP_EXPECT_SOF;
+       deframer->length = 0;
+
+       return src - buf;
+}
+
+static int rave_sp_rdu1_cmd_translate(enum rave_sp_command command)
+{
+       if (command >= RAVE_SP_CMD_STATUS &&
+           command <= RAVE_SP_CMD_CONTROL_EVENTS)
+               return command;
+
+       return -EINVAL;
+}
+
+static int rave_sp_rdu2_cmd_translate(enum rave_sp_command command)
+{
+       if (command >= RAVE_SP_CMD_GET_FIRMWARE_VERSION &&
+           command <= RAVE_SP_CMD_GET_GPIO_STATE)
+               return command;
+
+       if (command == RAVE_SP_CMD_REQ_COPPER_REV) {
+               /*
+                * As per RDU2 ICD 3.4.47 CMD_GET_COPPER_REV code is
+                * different from that for RDU1 and it is set to 0x28.
+                */
+               return 0x28;
+       }
+
+       return rave_sp_rdu1_cmd_translate(command);
+}
+
+static int rave_sp_default_cmd_translate(enum rave_sp_command command)
+{
+       /*
+        * All of the following command codes were taken from "Table :
+        * Communications Protocol Message Types" in section 3.3
+        * "MESSAGE TYPES" of Rave PIC24 ICD.
+        */
+       switch (command) {
+       case RAVE_SP_CMD_GET_FIRMWARE_VERSION:
+               return 0x11;
+       case RAVE_SP_CMD_GET_BOOTLOADER_VERSION:
+               return 0x12;
+       case RAVE_SP_CMD_BOOT_SOURCE:
+               return 0x14;
+       case RAVE_SP_CMD_SW_WDT:
+               return 0x1C;
+       case RAVE_SP_CMD_RESET:
+               return 0x1E;
+       case RAVE_SP_CMD_RESET_REASON:
+               return 0x1F;
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct rave_sp_checksum rave_sp_checksum_8b2c = {
+       .length     = 1,
+       .subroutine = csum_8b2c,
+};
+
+static const struct rave_sp_checksum rave_sp_checksum_ccitt = {
+       .length     = 2,
+       .subroutine = csum_ccitt,
+};
+
+static const struct rave_sp_variant rave_sp_legacy = {
+       .checksum = &rave_sp_checksum_8b2c,
+       .cmd = {
+               .translate = rave_sp_default_cmd_translate,
+       },
+};
+
+static const struct rave_sp_variant rave_sp_rdu1 = {
+       .checksum = &rave_sp_checksum_8b2c,
+       .cmd = {
+               .translate = rave_sp_rdu1_cmd_translate,
+       },
+};
+
+static const struct rave_sp_variant rave_sp_rdu2 = {
+       .checksum = &rave_sp_checksum_ccitt,
+       .cmd = {
+               .translate = rave_sp_rdu2_cmd_translate,
+       },
+};
+
+static const struct of_device_id rave_sp_dt_ids[] = {
+       { .compatible = "zii,rave-sp-niu",  .data = &rave_sp_legacy },
+       { .compatible = "zii,rave-sp-mezz", .data = &rave_sp_legacy },
+       { .compatible = "zii,rave-sp-esb",  .data = &rave_sp_legacy },
+       { .compatible = "zii,rave-sp-rdu1", .data = &rave_sp_rdu1   },
+       { .compatible = "zii,rave-sp-rdu2", .data = &rave_sp_rdu2   },
+       { /* sentinel */ }
+};
+
+static const struct serdev_device_ops rave_sp_serdev_device_ops = {
+       .receive_buf  = rave_sp_receive_buf,
+       .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int rave_sp_probe(struct serdev_device *serdev)
+{
+       struct device *dev = &serdev->dev;
+       struct rave_sp *sp;
+       u32 baud;
+       int ret;
+
+       if (of_property_read_u32(dev->of_node, "current-speed", &baud)) {
+               dev_err(dev,
+                       "'current-speed' is not specified in device node\n");
+               return -EINVAL;
+       }
+
+       sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+       if (!sp)
+               return -ENOMEM;
+
+       sp->serdev = serdev;
+       dev_set_drvdata(dev, sp);
+
+       sp->variant = of_device_get_match_data(dev);
+       if (!sp->variant)
+               return -ENODEV;
+
+       mutex_init(&sp->bus_lock);
+       mutex_init(&sp->reply_lock);
+       BLOCKING_INIT_NOTIFIER_HEAD(&sp->event_notifier_list);
+
+       serdev_device_set_client_ops(serdev, &rave_sp_serdev_device_ops);
+       ret = devm_serdev_device_open(dev, serdev);
+       if (ret)
+               return ret;
+
+       serdev_device_set_baudrate(serdev, baud);
+
+       return devm_of_platform_populate(dev);
+}
+
+MODULE_DEVICE_TABLE(of, rave_sp_dt_ids);
+
+static struct serdev_device_driver rave_sp_drv = {
+       .probe                  = rave_sp_probe,
+       .driver = {
+               .name           = "rave-sp",
+               .of_match_table = rave_sp_dt_ids,
+       },
+};
+module_serdev_device_driver(rave_sp_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP core driver");
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
deleted file mode 100644 (file)
index b3ae659..0000000
+++ /dev/null
@@ -1,508 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- *   Roger Tseng <rogerable@realtek.com>
- */
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
-
-#include "rtsx_pcr.h"
-
-static u8 rtl8411_get_ic_version(struct rtsx_pcr *pcr)
-{
-       u8 val;
-
-       rtsx_pci_read_register(pcr, SYS_VER, &val);
-       return val & 0x0F;
-}
-
-static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
-{
-       u8 val = 0;
-
-       rtsx_pci_read_register(pcr, RTL8411B_PACKAGE_MODE, &val);
-
-       if (val & 0x2)
-               return 1;
-       else
-               return 0;
-}
-
-static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
-       u32 reg1 = 0;
-       u8 reg3 = 0;
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
-
-       if (!rtsx_vendor_setting_valid(reg1))
-               return;
-
-       pcr->aspm_en = rtsx_reg_to_aspm(reg1);
-       pcr->sd30_drive_sel_1v8 =
-               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg1));
-       pcr->card_drive_sel &= 0x3F;
-       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
-
-       rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, &reg3);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
-       pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
-}
-
-static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
-       u32 reg = 0;
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
-
-       if (!rtsx_vendor_setting_valid(reg))
-               return;
-
-       pcr->aspm_en = rtsx_reg_to_aspm(reg);
-       pcr->sd30_drive_sel_1v8 =
-               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
-       pcr->sd30_drive_sel_3v3 =
-               map_sd_drive(rtl8411b_reg_to_sd30_drive_sel_3v3(reg));
-}
-
-static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
-{
-       rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
-}
-
-static int rtl8411_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       rtsx_pci_init_cmd(pcr);
-
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
-                       0xFF, pcr->sd30_drive_sel_3v3);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
-                       CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
-
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rtl8411b_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       rtsx_pci_init_cmd(pcr);
-
-       if (rtl8411b_is_qfn48(pcr))
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                               CARD_PULL_CTL3, 0xFF, 0xF5);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
-                       0xFF, pcr->sd30_drive_sel_3v3);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
-                       CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, FUNC_FORCE_CTL,
-                       0x06, 0x00);
-
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rtl8411_turn_on_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00);
-}
-
-static int rtl8411_turn_off_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01);
-}
-
-static int rtl8411_enable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D);
-}
-
-static int rtl8411_disable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00);
-}
-
-static int rtl8411_card_power_on(struct rtsx_pcr *pcr, int card)
-{
-       int err;
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CTL,
-                       BPP_LDO_POWB, BPP_LDO_SUSPEND);
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       /* To avoid too large in-rush current */
-       udelay(150);
-
-       err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
-                       BPP_POWER_MASK, BPP_POWER_10_PERCENT_ON);
-       if (err < 0)
-               return err;
-
-       udelay(150);
-
-       err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
-                       BPP_POWER_MASK, BPP_POWER_15_PERCENT_ON);
-       if (err < 0)
-               return err;
-
-       udelay(150);
-
-       err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
-                       BPP_POWER_MASK, BPP_POWER_ON);
-       if (err < 0)
-               return err;
-
-       return rtsx_pci_write_register(pcr, LDO_CTL, BPP_LDO_POWB, BPP_LDO_ON);
-}
-
-static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
-{
-       int err;
-
-       err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
-                       BPP_POWER_MASK, BPP_POWER_OFF);
-       if (err < 0)
-               return err;
-
-       return rtsx_pci_write_register(pcr, LDO_CTL,
-                       BPP_LDO_POWB, BPP_LDO_SUSPEND);
-}
-
-static int rtl8411_do_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage,
-               int bpp_tuned18_shift, int bpp_asic_1v8)
-{
-       u8 mask, val;
-       int err;
-
-       mask = (BPP_REG_TUNED18 << bpp_tuned18_shift) | BPP_PAD_MASK;
-       if (voltage == OUTPUT_3V3) {
-               err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
-               if (err < 0)
-                       return err;
-               val = (BPP_ASIC_3V3 << bpp_tuned18_shift) | BPP_PAD_3V3;
-       } else if (voltage == OUTPUT_1V8) {
-               err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
-               if (err < 0)
-                       return err;
-               val = (bpp_asic_1v8 << bpp_tuned18_shift) | BPP_PAD_1V8;
-       } else {
-               return -EINVAL;
-       }
-
-       return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
-}
-
-static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
-{
-       return rtl8411_do_switch_output_voltage(pcr, voltage,
-                       BPP_TUNED18_SHIFT_8411, BPP_ASIC_1V8);
-}
-
-static int rtl8402_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
-{
-       return rtl8411_do_switch_output_voltage(pcr, voltage,
-                       BPP_TUNED18_SHIFT_8402, BPP_ASIC_2V0);
-}
-
-static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
-{
-       unsigned int card_exist;
-
-       card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
-       card_exist &= CARD_EXIST;
-       if (!card_exist) {
-               /* Enable card CD */
-               rtsx_pci_write_register(pcr, CD_PAD_CTL,
-                               CD_DISABLE_MASK, CD_ENABLE);
-               /* Enable card interrupt */
-               rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x00);
-               return 0;
-       }
-
-       if (hweight32(card_exist) > 1) {
-               rtsx_pci_write_register(pcr, CARD_PWR_CTL,
-                               BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
-               msleep(100);
-
-               card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
-               if (card_exist & MS_EXIST)
-                       card_exist = MS_EXIST;
-               else if (card_exist & SD_EXIST)
-                       card_exist = SD_EXIST;
-               else
-                       card_exist = 0;
-
-               rtsx_pci_write_register(pcr, CARD_PWR_CTL,
-                               BPP_POWER_MASK, BPP_POWER_OFF);
-
-               pcr_dbg(pcr, "After CD deglitch, card_exist = 0x%x\n",
-                       card_exist);
-       }
-
-       if (card_exist & MS_EXIST) {
-               /* Disable SD interrupt */
-               rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x40);
-               rtsx_pci_write_register(pcr, CD_PAD_CTL,
-                               CD_DISABLE_MASK, MS_CD_EN_ONLY);
-       } else if (card_exist & SD_EXIST) {
-               /* Disable MS interrupt */
-               rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x80);
-               rtsx_pci_write_register(pcr, CD_PAD_CTL,
-                               CD_DISABLE_MASK, SD_CD_EN_ONLY);
-       }
-
-       return card_exist;
-}
-
-static int rtl8411_conv_clk_and_div_n(int input, int dir)
-{
-       int output;
-
-       if (dir == CLK_TO_DIV_N)
-               output = input * 4 / 5 - 2;
-       else
-               output = (input + 2) * 5 / 4;
-
-       return output;
-}
-
-static const struct pcr_ops rtl8411_pcr_ops = {
-       .fetch_vendor_settings = rtl8411_fetch_vendor_settings,
-       .extra_init_hw = rtl8411_extra_init_hw,
-       .optimize_phy = NULL,
-       .turn_on_led = rtl8411_turn_on_led,
-       .turn_off_led = rtl8411_turn_off_led,
-       .enable_auto_blink = rtl8411_enable_auto_blink,
-       .disable_auto_blink = rtl8411_disable_auto_blink,
-       .card_power_on = rtl8411_card_power_on,
-       .card_power_off = rtl8411_card_power_off,
-       .switch_output_voltage = rtl8411_switch_output_voltage,
-       .cd_deglitch = rtl8411_cd_deglitch,
-       .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
-       .force_power_down = rtl8411_force_power_down,
-};
-
-static const struct pcr_ops rtl8402_pcr_ops = {
-       .fetch_vendor_settings = rtl8411_fetch_vendor_settings,
-       .extra_init_hw = rtl8411_extra_init_hw,
-       .optimize_phy = NULL,
-       .turn_on_led = rtl8411_turn_on_led,
-       .turn_off_led = rtl8411_turn_off_led,
-       .enable_auto_blink = rtl8411_enable_auto_blink,
-       .disable_auto_blink = rtl8411_disable_auto_blink,
-       .card_power_on = rtl8411_card_power_on,
-       .card_power_off = rtl8411_card_power_off,
-       .switch_output_voltage = rtl8402_switch_output_voltage,
-       .cd_deglitch = rtl8411_cd_deglitch,
-       .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
-       .force_power_down = rtl8411_force_power_down,
-};
-
-static const struct pcr_ops rtl8411b_pcr_ops = {
-       .fetch_vendor_settings = rtl8411b_fetch_vendor_settings,
-       .extra_init_hw = rtl8411b_extra_init_hw,
-       .optimize_phy = NULL,
-       .turn_on_led = rtl8411_turn_on_led,
-       .turn_off_led = rtl8411_turn_off_led,
-       .enable_auto_blink = rtl8411_enable_auto_blink,
-       .disable_auto_blink = rtl8411_disable_auto_blink,
-       .card_power_on = rtl8411_card_power_on,
-       .card_power_off = rtl8411_card_power_off,
-       .switch_output_voltage = rtl8411_switch_output_voltage,
-       .cd_deglitch = rtl8411_cd_deglitch,
-       .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
-       .force_power_down = rtl8411_force_power_down,
-};
-
-/* SD Pull Control Enable:
- *     SD_DAT[3:0] ==> pull up
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull up
- *     SD_CMD      ==> pull up
- *     SD_CLK      ==> pull down
- */
-static const u32 rtl8411_sd_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xA9),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x09),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
-       0,
-};
-
-/* SD Pull Control Disable:
- *     SD_DAT[3:0] ==> pull down
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull down
- *     SD_CMD      ==> pull down
- *     SD_CLK      ==> pull down
- */
-static const u32 rtl8411_sd_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
-       0,
-};
-
-/* MS Pull Control Enable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rtl8411_ms_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
-       0,
-};
-
-/* MS Pull Control Disable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rtl8411_ms_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
-       0,
-};
-
-static const u32 rtl8411b_qfn64_sd_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x09 | 0xD0),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
-       0,
-};
-
-static const u32 rtl8411b_qfn48_sd_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x69 | 0x90),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x08 | 0x11),
-       0,
-};
-
-static const u32 rtl8411b_qfn64_sd_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
-       0,
-};
-
-static const u32 rtl8411b_qfn48_sd_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
-       0,
-};
-
-static const u32 rtl8411b_qfn64_ms_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05 | 0x50),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
-       0,
-};
-
-static const u32 rtl8411b_qfn48_ms_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
-       0,
-};
-
-static const u32 rtl8411b_qfn64_ms_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
-       0,
-};
-
-static const u32 rtl8411b_qfn48_ms_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
-       0,
-};
-
-static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
-{
-       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
-       pcr->num_slots = 2;
-       pcr->flags = 0;
-       pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
-       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
-       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
-       pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
-       pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
-       pcr->ic_version = rtl8411_get_ic_version(pcr);
-}
-
-void rtl8411_init_params(struct rtsx_pcr *pcr)
-{
-       rtl8411_init_common_params(pcr);
-       pcr->ops = &rtl8411_pcr_ops;
-       set_pull_ctrl_tables(pcr, rtl8411);
-}
-
-void rtl8411b_init_params(struct rtsx_pcr *pcr)
-{
-       rtl8411_init_common_params(pcr);
-       pcr->ops = &rtl8411b_pcr_ops;
-       if (rtl8411b_is_qfn48(pcr))
-               set_pull_ctrl_tables(pcr, rtl8411b_qfn48);
-       else
-               set_pull_ctrl_tables(pcr, rtl8411b_qfn64);
-}
-
-void rtl8402_init_params(struct rtsx_pcr *pcr)
-{
-       rtl8411_init_common_params(pcr);
-       pcr->ops = &rtl8402_pcr_ops;
-       set_pull_ctrl_tables(pcr, rtl8411);
-}
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
deleted file mode 100644 (file)
index b95beec..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
-
-#include "rtsx_pcr.h"
-
-static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
-{
-       u8 val;
-
-       val = rtsx_pci_readb(pcr, 0x1C);
-       return val & 0x0F;
-}
-
-static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
-       u32 reg;
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
-
-       if (rts5209_vendor_setting1_valid(reg)) {
-               if (rts5209_reg_check_ms_pmos(reg))
-                       pcr->flags |= PCR_MS_PMOS;
-               pcr->aspm_en = rts5209_reg_to_aspm(reg);
-       }
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
-
-       if (rts5209_vendor_setting2_valid(reg)) {
-               pcr->sd30_drive_sel_1v8 =
-                       rts5209_reg_to_sd30_drive_sel_1v8(reg);
-               pcr->sd30_drive_sel_3v3 =
-                       rts5209_reg_to_sd30_drive_sel_3v3(reg);
-               pcr->card_drive_sel = rts5209_reg_to_card_drive_sel(reg);
-       }
-}
-
-static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
-{
-       rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
-}
-
-static int rts5209_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       rtsx_pci_init_cmd(pcr);
-
-       /* Turn off LED */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO, 0xFF, 0x03);
-       /* Reset ASPM state to default value */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
-       /* Force CLKREQ# PIN to drive 0 to request clock */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
-       /* Configure GPIO as output */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO_DIR, 0xFF, 0x03);
-       /* Configure driving */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
-                       0xFF, pcr->sd30_drive_sel_3v3);
-
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5209_optimize_phy(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_phy_register(pcr, 0x00, 0xB966);
-}
-
-static int rts5209_turn_on_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00);
-}
-
-static int rts5209_turn_off_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01);
-}
-
-static int rts5209_enable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D);
-}
-
-static int rts5209_disable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00);
-}
-
-static int rts5209_card_power_on(struct rtsx_pcr *pcr, int card)
-{
-       int err;
-       u8 pwr_mask, partial_pwr_on, pwr_on;
-
-       pwr_mask = SD_POWER_MASK;
-       partial_pwr_on = SD_PARTIAL_POWER_ON;
-       pwr_on = SD_POWER_ON;
-
-       if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
-               pwr_mask = MS_POWER_MASK;
-               partial_pwr_on = MS_PARTIAL_POWER_ON;
-               pwr_on = MS_POWER_ON;
-       }
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       pwr_mask, partial_pwr_on);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x04);
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       /* To avoid too large in-rush current */
-       udelay(150);
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, pwr_mask, pwr_on);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x00);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
-{
-       u8 pwr_mask, pwr_off;
-
-       pwr_mask = SD_POWER_MASK;
-       pwr_off = SD_POWER_OFF;
-
-       if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
-               pwr_mask = MS_POWER_MASK;
-               pwr_off = MS_POWER_OFF;
-       }
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       pwr_mask | PMOS_STRG_MASK, pwr_off | PMOS_STRG_400mA);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x06);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
-{
-       int err;
-
-       if (voltage == OUTPUT_3V3) {
-               err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
-               if (err < 0)
-                       return err;
-               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
-               if (err < 0)
-                       return err;
-       } else if (voltage == OUTPUT_1V8) {
-               err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
-               if (err < 0)
-                       return err;
-               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
-               if (err < 0)
-                       return err;
-       } else {
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static const struct pcr_ops rts5209_pcr_ops = {
-       .fetch_vendor_settings = rts5209_fetch_vendor_settings,
-       .extra_init_hw = rts5209_extra_init_hw,
-       .optimize_phy = rts5209_optimize_phy,
-       .turn_on_led = rts5209_turn_on_led,
-       .turn_off_led = rts5209_turn_off_led,
-       .enable_auto_blink = rts5209_enable_auto_blink,
-       .disable_auto_blink = rts5209_disable_auto_blink,
-       .card_power_on = rts5209_card_power_on,
-       .card_power_off = rts5209_card_power_off,
-       .switch_output_voltage = rts5209_switch_output_voltage,
-       .cd_deglitch = NULL,
-       .conv_clk_and_div_n = NULL,
-       .force_power_down = rts5209_force_power_down,
-};
-
-/* SD Pull Control Enable:
- *     SD_DAT[3:0] ==> pull up
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull up
- *     SD_CMD      ==> pull up
- *     SD_CLK      ==> pull down
- */
-static const u32 rts5209_sd_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
-       0,
-};
-
-/* SD Pull Control Disable:
- *     SD_DAT[3:0] ==> pull down
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull down
- *     SD_CMD      ==> pull down
- *     SD_CLK      ==> pull down
- */
-static const u32 rts5209_sd_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
-       0,
-};
-
-/* MS Pull Control Enable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rts5209_ms_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
-       0,
-};
-
-/* MS Pull Control Disable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rts5209_ms_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
-       0,
-};
-
-void rts5209_init_params(struct rtsx_pcr *pcr)
-{
-       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 |
-               EXTRA_CAPS_SD_SDR104 | EXTRA_CAPS_MMC_8BIT;
-       pcr->num_slots = 2;
-       pcr->ops = &rts5209_pcr_ops;
-
-       pcr->flags = 0;
-       pcr->card_drive_sel = RTS5209_CARD_DRIVE_DEFAULT;
-       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
-       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
-       pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
-       pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
-
-       pcr->ic_version = rts5209_get_ic_version(pcr);
-       pcr->sd_pull_ctl_enable_tbl = rts5209_sd_pull_ctl_enable_tbl;
-       pcr->sd_pull_ctl_disable_tbl = rts5209_sd_pull_ctl_disable_tbl;
-       pcr->ms_pull_ctl_enable_tbl = rts5209_ms_pull_ctl_enable_tbl;
-       pcr->ms_pull_ctl_disable_tbl = rts5209_ms_pull_ctl_disable_tbl;
-}
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
deleted file mode 100644 (file)
index ff296a4..0000000
+++ /dev/null
@@ -1,374 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- *   Roger Tseng <rogerable@realtek.com>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
-
-#include "rtsx_pcr.h"
-
-static u8 rts5227_get_ic_version(struct rtsx_pcr *pcr)
-{
-       u8 val;
-
-       rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
-       return val & 0x0F;
-}
-
-static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
-{
-       u8 driving_3v3[4][3] = {
-               {0x13, 0x13, 0x13},
-               {0x96, 0x96, 0x96},
-               {0x7F, 0x7F, 0x7F},
-               {0x96, 0x96, 0x96},
-       };
-       u8 driving_1v8[4][3] = {
-               {0x99, 0x99, 0x99},
-               {0xAA, 0xAA, 0xAA},
-               {0xFE, 0xFE, 0xFE},
-               {0xB3, 0xB3, 0xB3},
-       };
-       u8 (*driving)[3], drive_sel;
-
-       if (voltage == OUTPUT_3V3) {
-               driving = driving_3v3;
-               drive_sel = pcr->sd30_drive_sel_3v3;
-       } else {
-               driving = driving_1v8;
-               drive_sel = pcr->sd30_drive_sel_1v8;
-       }
-
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
-                       0xFF, driving[drive_sel][0]);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
-                       0xFF, driving[drive_sel][1]);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
-                       0xFF, driving[drive_sel][2]);
-}
-
-static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
-       u32 reg;
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
-
-       if (!rtsx_vendor_setting_valid(reg))
-               return;
-
-       pcr->aspm_en = rtsx_reg_to_aspm(reg);
-       pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
-       pcr->card_drive_sel &= 0x3F;
-       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
-       pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
-       if (rtsx_reg_check_reverse_socket(reg))
-               pcr->flags |= PCR_REVERSE_SOCKET;
-}
-
-static void rts5227_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
-{
-       /* Set relink_time to 0 */
-       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
-       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
-       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
-
-       if (pm_state == HOST_ENTER_S3)
-               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x10);
-
-       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
-}
-
-static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       u16 cap;
-
-       rtsx_pci_init_cmd(pcr);
-
-       /* Configure GPIO as output */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
-       /* Reset ASPM state to default value */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
-       /* Switch LDO3318 source from DV33 to card_3v3 */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
-       /* LED shine disabled, set initial shine cycle period */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
-       /* Configure LTR */
-       pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cap);
-       if (cap & PCI_EXP_DEVCTL2_LTR_EN)
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
-       /* Configure OBFF */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
-       /* Configure driving */
-       rts5227_fill_driving(pcr, OUTPUT_3V3);
-       /* Configure force_clock_req */
-       if (pcr->flags & PCR_REVERSE_SOCKET)
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB8, 0xB8);
-       else
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB8, 0x88);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, pcr->reg_pm_ctrl3, 0x10, 0x00);
-
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5227_optimize_phy(struct rtsx_pcr *pcr)
-{
-       int err;
-
-       err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
-       if (err < 0)
-               return err;
-
-       /* Optimize RX sensitivity */
-       return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
-}
-
-static int rts5227_turn_on_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
-}
-
-static int rts5227_turn_off_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
-}
-
-static int rts5227_enable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
-}
-
-static int rts5227_disable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
-}
-
-static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card)
-{
-       int err;
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK, SD_PARTIAL_POWER_ON);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x02);
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       /* To avoid too large in-rush current */
-       udelay(150);
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK, SD_POWER_ON);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x06);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
-{
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK | PMOS_STRG_MASK,
-                       SD_POWER_OFF | PMOS_STRG_400mA);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0X00);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
-{
-       int err;
-
-       if (voltage == OUTPUT_3V3) {
-               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
-               if (err < 0)
-                       return err;
-       } else if (voltage == OUTPUT_1V8) {
-               err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
-               if (err < 0)
-                       return err;
-               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C80 | 0x24);
-               if (err < 0)
-                       return err;
-       } else {
-               return -EINVAL;
-       }
-
-       /* set pad drive */
-       rtsx_pci_init_cmd(pcr);
-       rts5227_fill_driving(pcr, voltage);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static const struct pcr_ops rts5227_pcr_ops = {
-       .fetch_vendor_settings = rts5227_fetch_vendor_settings,
-       .extra_init_hw = rts5227_extra_init_hw,
-       .optimize_phy = rts5227_optimize_phy,
-       .turn_on_led = rts5227_turn_on_led,
-       .turn_off_led = rts5227_turn_off_led,
-       .enable_auto_blink = rts5227_enable_auto_blink,
-       .disable_auto_blink = rts5227_disable_auto_blink,
-       .card_power_on = rts5227_card_power_on,
-       .card_power_off = rts5227_card_power_off,
-       .switch_output_voltage = rts5227_switch_output_voltage,
-       .cd_deglitch = NULL,
-       .conv_clk_and_div_n = NULL,
-       .force_power_down = rts5227_force_power_down,
-};
-
-/* SD Pull Control Enable:
- *     SD_DAT[3:0] ==> pull up
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull up
- *     SD_CMD      ==> pull up
- *     SD_CLK      ==> pull down
- */
-static const u32 rts5227_sd_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
-       0,
-};
-
-/* SD Pull Control Disable:
- *     SD_DAT[3:0] ==> pull down
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull down
- *     SD_CMD      ==> pull down
- *     SD_CLK      ==> pull down
- */
-static const u32 rts5227_sd_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
-       0,
-};
-
-/* MS Pull Control Enable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rts5227_ms_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
-       0,
-};
-
-/* MS Pull Control Disable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rts5227_ms_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
-       0,
-};
-
-void rts5227_init_params(struct rtsx_pcr *pcr)
-{
-       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
-       pcr->num_slots = 2;
-       pcr->ops = &rts5227_pcr_ops;
-
-       pcr->flags = 0;
-       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
-       pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
-       pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
-       pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
-       pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
-
-       pcr->ic_version = rts5227_get_ic_version(pcr);
-       pcr->sd_pull_ctl_enable_tbl = rts5227_sd_pull_ctl_enable_tbl;
-       pcr->sd_pull_ctl_disable_tbl = rts5227_sd_pull_ctl_disable_tbl;
-       pcr->ms_pull_ctl_enable_tbl = rts5227_ms_pull_ctl_enable_tbl;
-       pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl;
-
-       pcr->reg_pm_ctrl3 = PM_CTRL3;
-}
-
-static int rts522a_optimize_phy(struct rtsx_pcr *pcr)
-{
-       int err;
-
-       err = rtsx_pci_write_register(pcr, RTS522A_PM_CTRL3, D3_DELINK_MODE_EN,
-               0x00);
-       if (err < 0)
-               return err;
-
-       if (is_version(pcr, 0x522A, IC_VER_A)) {
-               err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
-                       PHY_RCR2_INIT_27S);
-               if (err)
-                       return err;
-
-               rtsx_pci_write_phy_register(pcr, PHY_RCR1, PHY_RCR1_INIT_27S);
-               rtsx_pci_write_phy_register(pcr, PHY_FLD0, PHY_FLD0_INIT_27S);
-               rtsx_pci_write_phy_register(pcr, PHY_FLD3, PHY_FLD3_INIT_27S);
-               rtsx_pci_write_phy_register(pcr, PHY_FLD4, PHY_FLD4_INIT_27S);
-       }
-
-       return 0;
-}
-
-static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       rts5227_extra_init_hw(pcr);
-
-       rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
-               FUNC_FORCE_UPME_XMT_DBG);
-       rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
-       rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
-       rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 0xFF, 0x11);
-
-       return 0;
-}
-
-/* rts522a operations mainly derived from rts5227, except phy/hw init setting.
- */
-static const struct pcr_ops rts522a_pcr_ops = {
-       .fetch_vendor_settings = rts5227_fetch_vendor_settings,
-       .extra_init_hw = rts522a_extra_init_hw,
-       .optimize_phy = rts522a_optimize_phy,
-       .turn_on_led = rts5227_turn_on_led,
-       .turn_off_led = rts5227_turn_off_led,
-       .enable_auto_blink = rts5227_enable_auto_blink,
-       .disable_auto_blink = rts5227_disable_auto_blink,
-       .card_power_on = rts5227_card_power_on,
-       .card_power_off = rts5227_card_power_off,
-       .switch_output_voltage = rts5227_switch_output_voltage,
-       .cd_deglitch = NULL,
-       .conv_clk_and_div_n = NULL,
-       .force_power_down = rts5227_force_power_down,
-};
-
-void rts522a_init_params(struct rtsx_pcr *pcr)
-{
-       rts5227_init_params(pcr);
-
-       pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
-}
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
deleted file mode 100644 (file)
index 9ed9dc8..0000000
+++ /dev/null
@@ -1,273 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
-
-#include "rtsx_pcr.h"
-
-static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
-{
-       u8 val;
-
-       rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
-       return val & 0x0F;
-}
-
-static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
-       u32 reg;
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
-
-       if (!rtsx_vendor_setting_valid(reg))
-               return;
-
-       pcr->aspm_en = rtsx_reg_to_aspm(reg);
-       pcr->sd30_drive_sel_1v8 =
-               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
-       pcr->card_drive_sel &= 0x3F;
-       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
-       pcr->sd30_drive_sel_3v3 =
-               map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
-}
-
-static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
-{
-       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
-}
-
-static int rts5229_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       rtsx_pci_init_cmd(pcr);
-
-       /* Configure GPIO as output */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
-       /* Reset ASPM state to default value */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
-       /* Force CLKREQ# PIN to drive 0 to request clock */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
-       /* Switch LDO3318 source from DV33 to card_3v3 */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
-       /* LED shine disabled, set initial shine cycle period */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
-       /* Configure driving */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
-                       0xFF, pcr->sd30_drive_sel_3v3);
-
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5229_optimize_phy(struct rtsx_pcr *pcr)
-{
-       /* Optimize RX sensitivity */
-       return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
-}
-
-static int rts5229_turn_on_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
-}
-
-static int rts5229_turn_off_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
-}
-
-static int rts5229_enable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
-}
-
-static int rts5229_disable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
-}
-
-static int rts5229_card_power_on(struct rtsx_pcr *pcr, int card)
-{
-       int err;
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK, SD_PARTIAL_POWER_ON);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x02);
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       /* To avoid too large in-rush current */
-       udelay(150);
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK, SD_POWER_ON);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x06);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
-{
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK | PMOS_STRG_MASK,
-                       SD_POWER_OFF | PMOS_STRG_400mA);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x00);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
-{
-       int err;
-
-       if (voltage == OUTPUT_3V3) {
-               err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
-               if (err < 0)
-                       return err;
-               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
-               if (err < 0)
-                       return err;
-       } else if (voltage == OUTPUT_1V8) {
-               err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
-               if (err < 0)
-                       return err;
-               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
-               if (err < 0)
-                       return err;
-       } else {
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static const struct pcr_ops rts5229_pcr_ops = {
-       .fetch_vendor_settings = rts5229_fetch_vendor_settings,
-       .extra_init_hw = rts5229_extra_init_hw,
-       .optimize_phy = rts5229_optimize_phy,
-       .turn_on_led = rts5229_turn_on_led,
-       .turn_off_led = rts5229_turn_off_led,
-       .enable_auto_blink = rts5229_enable_auto_blink,
-       .disable_auto_blink = rts5229_disable_auto_blink,
-       .card_power_on = rts5229_card_power_on,
-       .card_power_off = rts5229_card_power_off,
-       .switch_output_voltage = rts5229_switch_output_voltage,
-       .cd_deglitch = NULL,
-       .conv_clk_and_div_n = NULL,
-       .force_power_down = rts5229_force_power_down,
-};
-
-/* SD Pull Control Enable:
- *     SD_DAT[3:0] ==> pull up
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull up
- *     SD_CMD      ==> pull up
- *     SD_CLK      ==> pull down
- */
-static const u32 rts5229_sd_pull_ctl_enable_tbl1[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
-       0,
-};
-
-/* For RTS5229 version C */
-static const u32 rts5229_sd_pull_ctl_enable_tbl2[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD9),
-       0,
-};
-
-/* SD Pull Control Disable:
- *     SD_DAT[3:0] ==> pull down
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull down
- *     SD_CMD      ==> pull down
- *     SD_CLK      ==> pull down
- */
-static const u32 rts5229_sd_pull_ctl_disable_tbl1[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
-       0,
-};
-
-/* For RTS5229 version C */
-static const u32 rts5229_sd_pull_ctl_disable_tbl2[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE5),
-       0,
-};
-
-/* MS Pull Control Enable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rts5229_ms_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
-       0,
-};
-
-/* MS Pull Control Disable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rts5229_ms_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
-       0,
-};
-
-void rts5229_init_params(struct rtsx_pcr *pcr)
-{
-       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
-       pcr->num_slots = 2;
-       pcr->ops = &rts5229_pcr_ops;
-
-       pcr->flags = 0;
-       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
-       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
-       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
-       pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
-       pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
-
-       pcr->ic_version = rts5229_get_ic_version(pcr);
-       if (pcr->ic_version == IC_VER_C) {
-               pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl2;
-               pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl2;
-       } else {
-               pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl1;
-               pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl1;
-       }
-       pcr->ms_pull_ctl_enable_tbl = rts5229_ms_pull_ctl_enable_tbl;
-       pcr->ms_pull_ctl_disable_tbl = rts5229_ms_pull_ctl_disable_tbl;
-}
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
deleted file mode 100644 (file)
index 7fcf37b..0000000
+++ /dev/null
@@ -1,741 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
-
-#include "rtsx_pcr.h"
-
-static u8 rts5249_get_ic_version(struct rtsx_pcr *pcr)
-{
-       u8 val;
-
-       rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
-       return val & 0x0F;
-}
-
-static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
-{
-       u8 driving_3v3[4][3] = {
-               {0x11, 0x11, 0x18},
-               {0x55, 0x55, 0x5C},
-               {0xFF, 0xFF, 0xFF},
-               {0x96, 0x96, 0x96},
-       };
-       u8 driving_1v8[4][3] = {
-               {0xC4, 0xC4, 0xC4},
-               {0x3C, 0x3C, 0x3C},
-               {0xFE, 0xFE, 0xFE},
-               {0xB3, 0xB3, 0xB3},
-       };
-       u8 (*driving)[3], drive_sel;
-
-       if (voltage == OUTPUT_3V3) {
-               driving = driving_3v3;
-               drive_sel = pcr->sd30_drive_sel_3v3;
-       } else {
-               driving = driving_1v8;
-               drive_sel = pcr->sd30_drive_sel_1v8;
-       }
-
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
-                       0xFF, driving[drive_sel][0]);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
-                       0xFF, driving[drive_sel][1]);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
-                       0xFF, driving[drive_sel][2]);
-}
-
-static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
-       u32 reg;
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
-
-       if (!rtsx_vendor_setting_valid(reg)) {
-               pcr_dbg(pcr, "skip fetch vendor setting\n");
-               return;
-       }
-
-       pcr->aspm_en = rtsx_reg_to_aspm(reg);
-       pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
-       pcr->card_drive_sel &= 0x3F;
-       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
-
-       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
-       pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
-       if (rtsx_reg_check_reverse_socket(reg))
-               pcr->flags |= PCR_REVERSE_SOCKET;
-}
-
-static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
-{
-       /* Set relink_time to 0 */
-       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
-       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
-       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
-
-       if (pm_state == HOST_ENTER_S3)
-               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
-                       D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
-
-       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
-}
-
-static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
-{
-       struct rtsx_cr_option *option = &(pcr->option);
-       u32 lval;
-
-       if (CHK_PCI_PID(pcr, PID_524A))
-               rtsx_pci_read_config_dword(pcr,
-                       PCR_ASPM_SETTING_REG1, &lval);
-       else
-               rtsx_pci_read_config_dword(pcr,
-                       PCR_ASPM_SETTING_REG2, &lval);
-
-       if (lval & ASPM_L1_1_EN_MASK)
-               rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
-
-       if (lval & ASPM_L1_2_EN_MASK)
-               rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
-
-       if (lval & PM_L1_1_EN_MASK)
-               rtsx_set_dev_flag(pcr, PM_L1_1_EN);
-
-       if (lval & PM_L1_2_EN_MASK)
-               rtsx_set_dev_flag(pcr, PM_L1_2_EN);
-
-       if (option->ltr_en) {
-               u16 val;
-
-               pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
-               if (val & PCI_EXP_DEVCTL2_LTR_EN) {
-                       option->ltr_enabled = true;
-                       option->ltr_active = true;
-                       rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
-               } else {
-                       option->ltr_enabled = false;
-               }
-       }
-}
-
-static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
-{
-       struct rtsx_cr_option *option = &(pcr->option);
-
-       if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
-                               | PM_L1_1_EN | PM_L1_2_EN))
-               option->force_clkreq_0 = false;
-       else
-               option->force_clkreq_0 = true;
-
-       return 0;
-}
-
-static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       struct rtsx_cr_option *option = &(pcr->option);
-
-       rts5249_init_from_cfg(pcr);
-       rts5249_init_from_hw(pcr);
-
-       rtsx_pci_init_cmd(pcr);
-
-       /* Rest L1SUB Config */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
-       /* Configure GPIO as output */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
-       /* Reset ASPM state to default value */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
-       /* Switch LDO3318 source from DV33 to card_3v3 */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
-       /* LED shine disabled, set initial shine cycle period */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
-       /* Configure driving */
-       rts5249_fill_driving(pcr, OUTPUT_3V3);
-       if (pcr->flags & PCR_REVERSE_SOCKET)
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
-       else
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
-
-       /*
-        * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
-        * to drive low, and we forcibly request clock.
-        */
-       if (option->force_clkreq_0)
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
-                       FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
-       else
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
-                       FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
-
-       return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
-}
-
-static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
-{
-       int err;
-
-       err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
-       if (err < 0)
-               return err;
-
-       err = rtsx_pci_write_phy_register(pcr, PHY_REV,
-                       PHY_REV_RESV | PHY_REV_RXIDLE_LATCHED |
-                       PHY_REV_P1_EN | PHY_REV_RXIDLE_EN |
-                       PHY_REV_CLKREQ_TX_EN | PHY_REV_RX_PWST |
-                       PHY_REV_CLKREQ_DT_1_0 | PHY_REV_STOP_CLKRD |
-                       PHY_REV_STOP_CLKWR);
-       if (err < 0)
-               return err;
-
-       msleep(1);
-
-       err = rtsx_pci_write_phy_register(pcr, PHY_BPCR,
-                       PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL |
-                       PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN);
-       if (err < 0)
-               return err;
-
-       err = rtsx_pci_write_phy_register(pcr, PHY_PCR,
-                       PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
-                       PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 |
-                       PHY_PCR_RSSI_EN | PHY_PCR_RX10K);
-       if (err < 0)
-               return err;
-
-       err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
-                       PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR |
-                       PHY_RCR2_CDR_SR_2 | PHY_RCR2_FREQSEL_12 |
-                       PHY_RCR2_CDR_SC_12P | PHY_RCR2_CALIB_LATE);
-       if (err < 0)
-               return err;
-
-       err = rtsx_pci_write_phy_register(pcr, PHY_FLD4,
-                       PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF |
-                       PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA |
-                       PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER |
-                       PHY_FLD4_BER_CHK_EN);
-       if (err < 0)
-               return err;
-       err = rtsx_pci_write_phy_register(pcr, PHY_RDR,
-                       PHY_RDR_RXDSEL_1_9 | PHY_SSC_AUTO_PWD);
-       if (err < 0)
-               return err;
-       err = rtsx_pci_write_phy_register(pcr, PHY_RCR1,
-                       PHY_RCR1_ADP_TIME_4 | PHY_RCR1_VCO_COARSE);
-       if (err < 0)
-               return err;
-       err = rtsx_pci_write_phy_register(pcr, PHY_FLD3,
-                       PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 |
-                       PHY_FLD3_RXDELINK);
-       if (err < 0)
-               return err;
-
-       return rtsx_pci_write_phy_register(pcr, PHY_TUNE,
-                       PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 |
-                       PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 |
-                       PHY_TUNE_TUNED12 | PHY_TUNE_TUNEA12);
-}
-
-static int rtsx_base_turn_on_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
-}
-
-static int rtsx_base_turn_off_led(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
-}
-
-static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
-}
-
-static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
-{
-       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
-}
-
-static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
-{
-       int err;
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK, SD_VCC_PARTIAL_POWER_ON);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x02);
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       msleep(5);
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK, SD_VCC_POWER_ON);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x06);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card)
-{
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
-                       SD_POWER_MASK, SD_POWER_OFF);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0x00);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
-{
-       int err;
-       u16 append;
-
-       switch (voltage) {
-       case OUTPUT_3V3:
-               err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK,
-                       PHY_TUNE_VOLTAGE_3V3);
-               if (err < 0)
-                       return err;
-               break;
-       case OUTPUT_1V8:
-               append = PHY_TUNE_D18_1V8;
-               if (CHK_PCI_PID(pcr, 0x5249)) {
-                       err = rtsx_pci_update_phy(pcr, PHY_BACR,
-                               PHY_BACR_BASIC_MASK, 0);
-                       if (err < 0)
-                               return err;
-                       append = PHY_TUNE_D18_1V7;
-               }
-
-               err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK,
-                       append);
-               if (err < 0)
-                       return err;
-               break;
-       default:
-               pcr_dbg(pcr, "unknown output voltage %d\n", voltage);
-               return -EINVAL;
-       }
-
-       /* set pad drive */
-       rtsx_pci_init_cmd(pcr);
-       rts5249_fill_driving(pcr, voltage);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static void rts5249_set_aspm(struct rtsx_pcr *pcr, bool enable)
-{
-       struct rtsx_cr_option *option = &pcr->option;
-       u8 val = 0;
-
-       if (pcr->aspm_enabled == enable)
-               return;
-
-       if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
-               if (enable)
-                       val = pcr->aspm_en;
-               rtsx_pci_update_cfg_byte(pcr,
-                       pcr->pcie_cap + PCI_EXP_LNKCTL,
-                       ASPM_MASK_NEG, val);
-       } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
-               u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
-               if (!enable)
-                       val = FORCE_ASPM_CTL0;
-               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
-       }
-
-       pcr->aspm_enabled = enable;
-}
-
-static const struct pcr_ops rts5249_pcr_ops = {
-       .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
-       .extra_init_hw = rts5249_extra_init_hw,
-       .optimize_phy = rts5249_optimize_phy,
-       .turn_on_led = rtsx_base_turn_on_led,
-       .turn_off_led = rtsx_base_turn_off_led,
-       .enable_auto_blink = rtsx_base_enable_auto_blink,
-       .disable_auto_blink = rtsx_base_disable_auto_blink,
-       .card_power_on = rtsx_base_card_power_on,
-       .card_power_off = rtsx_base_card_power_off,
-       .switch_output_voltage = rtsx_base_switch_output_voltage,
-       .force_power_down = rtsx_base_force_power_down,
-       .set_aspm = rts5249_set_aspm,
-};
-
-/* SD Pull Control Enable:
- *     SD_DAT[3:0] ==> pull up
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull up
- *     SD_CMD      ==> pull up
- *     SD_CLK      ==> pull down
- */
-static const u32 rts5249_sd_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
-       0,
-};
-
-/* SD Pull Control Disable:
- *     SD_DAT[3:0] ==> pull down
- *     SD_CD       ==> pull up
- *     SD_WP       ==> pull down
- *     SD_CMD      ==> pull down
- *     SD_CLK      ==> pull down
- */
-static const u32 rts5249_sd_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
-       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
-       0,
-};
-
-/* MS Pull Control Enable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rts5249_ms_pull_ctl_enable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
-       0,
-};
-
-/* MS Pull Control Disable:
- *     MS CD       ==> pull up
- *     others      ==> pull down
- */
-static const u32 rts5249_ms_pull_ctl_disable_tbl[] = {
-       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
-       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
-       0,
-};
-
-void rts5249_init_params(struct rtsx_pcr *pcr)
-{
-       struct rtsx_cr_option *option = &(pcr->option);
-
-       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
-       pcr->num_slots = 2;
-       pcr->ops = &rts5249_pcr_ops;
-
-       pcr->flags = 0;
-       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
-       pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
-       pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
-       pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
-       pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
-
-       pcr->ic_version = rts5249_get_ic_version(pcr);
-       pcr->sd_pull_ctl_enable_tbl = rts5249_sd_pull_ctl_enable_tbl;
-       pcr->sd_pull_ctl_disable_tbl = rts5249_sd_pull_ctl_disable_tbl;
-       pcr->ms_pull_ctl_enable_tbl = rts5249_ms_pull_ctl_enable_tbl;
-       pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl;
-
-       pcr->reg_pm_ctrl3 = PM_CTRL3;
-
-       option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
-                               | LTR_L1SS_PWR_GATE_EN);
-       option->ltr_en = true;
-
-       /* Init latency of active, idle, L1OFF to 60us, 300us, 3ms */
-       option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
-       option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
-       option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
-       option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
-       option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
-       option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
-       option->ltr_l1off_snooze_sspwrgate =
-               LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF;
-}
-
-static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
-{
-       addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr;
-
-       return __rtsx_pci_write_phy_register(pcr, addr, val);
-}
-
-static int rts524a_read_phy(struct rtsx_pcr *pcr, u8 addr, u16 *val)
-{
-       addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr;
-
-       return __rtsx_pci_read_phy_register(pcr, addr, val);
-}
-
-static int rts524a_optimize_phy(struct rtsx_pcr *pcr)
-{
-       int err;
-
-       err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
-               D3_DELINK_MODE_EN, 0x00);
-       if (err < 0)
-               return err;
-
-       rtsx_pci_write_phy_register(pcr, PHY_PCR,
-               PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
-               PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 | PHY_PCR_RSSI_EN);
-       rtsx_pci_write_phy_register(pcr, PHY_SSCCR3,
-               PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY);
-
-       if (is_version(pcr, 0x524A, IC_VER_A)) {
-               rtsx_pci_write_phy_register(pcr, PHY_SSCCR3,
-                       PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY);
-               rtsx_pci_write_phy_register(pcr, PHY_SSCCR2,
-                       PHY_SSCCR2_PLL_NCODE | PHY_SSCCR2_TIME0 |
-                       PHY_SSCCR2_TIME2_WIDTH);
-               rtsx_pci_write_phy_register(pcr, PHY_ANA1A,
-                       PHY_ANA1A_TXR_LOOPBACK | PHY_ANA1A_RXT_BIST |
-                       PHY_ANA1A_TXR_BIST | PHY_ANA1A_REV);
-               rtsx_pci_write_phy_register(pcr, PHY_ANA1D,
-                       PHY_ANA1D_DEBUG_ADDR);
-               rtsx_pci_write_phy_register(pcr, PHY_DIG1E,
-                       PHY_DIG1E_REV | PHY_DIG1E_D0_X_D1 |
-                       PHY_DIG1E_RX_ON_HOST | PHY_DIG1E_RCLK_REF_HOST |
-                       PHY_DIG1E_RCLK_TX_EN_KEEP |
-                       PHY_DIG1E_RCLK_TX_TERM_KEEP |
-                       PHY_DIG1E_RCLK_RX_EIDLE_ON | PHY_DIG1E_TX_TERM_KEEP |
-                       PHY_DIG1E_RX_TERM_KEEP | PHY_DIG1E_TX_EN_KEEP |
-                       PHY_DIG1E_RX_EN_KEEP);
-       }
-
-       rtsx_pci_write_phy_register(pcr, PHY_ANA08,
-               PHY_ANA08_RX_EQ_DCGAIN | PHY_ANA08_SEL_RX_EN |
-               PHY_ANA08_RX_EQ_VAL | PHY_ANA08_SCP | PHY_ANA08_SEL_IPI);
-
-       return 0;
-}
-
-static int rts524a_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       rts5249_extra_init_hw(pcr);
-
-       rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
-               FORCE_ASPM_L1_EN, FORCE_ASPM_L1_EN);
-       rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
-       rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_VCC_LMT_EN,
-               LDO_VCC_LMT_EN);
-       rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
-       if (is_version(pcr, 0x524A, IC_VER_A)) {
-               rtsx_pci_write_register(pcr, LDO_DV18_CFG,
-                       LDO_DV18_SR_MASK, LDO_DV18_SR_DF);
-               rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
-                       LDO_VCC_REF_TUNE_MASK, LDO_VCC_REF_1V2);
-               rtsx_pci_write_register(pcr, LDO_VIO_CFG,
-                       LDO_VIO_REF_TUNE_MASK, LDO_VIO_REF_1V2);
-               rtsx_pci_write_register(pcr, LDO_VIO_CFG,
-                       LDO_VIO_SR_MASK, LDO_VIO_SR_DF);
-               rtsx_pci_write_register(pcr, LDO_DV12S_CFG,
-                       LDO_REF12_TUNE_MASK, LDO_REF12_TUNE_DF);
-               rtsx_pci_write_register(pcr, SD40_LDO_CTL1,
-                       SD40_VIO_TUNE_MASK, SD40_VIO_TUNE_1V7);
-       }
-
-       return 0;
-}
-
-static void rts5250_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
-{
-       struct rtsx_cr_option *option = &(pcr->option);
-
-       u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
-       int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
-       int aspm_L1_1, aspm_L1_2;
-       u8 val = 0;
-
-       aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
-       aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
-
-       if (active) {
-               /* Run, latency: 60us */
-               if (aspm_L1_1)
-                       val = option->ltr_l1off_snooze_sspwrgate;
-       } else {
-               /* L1off, latency: 300us */
-               if (aspm_L1_2)
-                       val = option->ltr_l1off_sspwrgate;
-       }
-
-       if (aspm_L1_1 || aspm_L1_2) {
-               if (rtsx_check_dev_flag(pcr,
-                                       LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
-                       if (card_exist)
-                               val &= ~L1OFF_MBIAS2_EN_5250;
-                       else
-                               val |= L1OFF_MBIAS2_EN_5250;
-               }
-       }
-       rtsx_set_l1off_sub(pcr, val);
-}
-
-static const struct pcr_ops rts524a_pcr_ops = {
-       .write_phy = rts524a_write_phy,
-       .read_phy = rts524a_read_phy,
-       .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
-       .extra_init_hw = rts524a_extra_init_hw,
-       .optimize_phy = rts524a_optimize_phy,
-       .turn_on_led = rtsx_base_turn_on_led,
-       .turn_off_led = rtsx_base_turn_off_led,
-       .enable_auto_blink = rtsx_base_enable_auto_blink,
-       .disable_auto_blink = rtsx_base_disable_auto_blink,
-       .card_power_on = rtsx_base_card_power_on,
-       .card_power_off = rtsx_base_card_power_off,
-       .switch_output_voltage = rtsx_base_switch_output_voltage,
-       .force_power_down = rtsx_base_force_power_down,
-       .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
-       .set_aspm = rts5249_set_aspm,
-};
-
-void rts524a_init_params(struct rtsx_pcr *pcr)
-{
-       rts5249_init_params(pcr);
-       pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
-       pcr->option.ltr_l1off_snooze_sspwrgate =
-               LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
-
-       pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
-       pcr->ops = &rts524a_pcr_ops;
-}
-
-static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card)
-{
-       rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
-               LDO_VCC_TUNE_MASK, LDO_VCC_3V3);
-       return rtsx_base_card_power_on(pcr, card);
-}
-
-static int rts525a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
-{
-       switch (voltage) {
-       case OUTPUT_3V3:
-               rtsx_pci_write_register(pcr, LDO_CONFIG2,
-                       LDO_D3318_MASK, LDO_D3318_33V);
-               rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
-               break;
-       case OUTPUT_1V8:
-               rtsx_pci_write_register(pcr, LDO_CONFIG2,
-                       LDO_D3318_MASK, LDO_D3318_18V);
-               rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
-                       SD_IO_USING_1V8);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       rtsx_pci_init_cmd(pcr);
-       rts5249_fill_driving(pcr, voltage);
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-static int rts525a_optimize_phy(struct rtsx_pcr *pcr)
-{
-       int err;
-
-       err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
-               D3_DELINK_MODE_EN, 0x00);
-       if (err < 0)
-               return err;
-
-       rtsx_pci_write_phy_register(pcr, _PHY_FLD0,
-               _PHY_FLD0_CLK_REQ_20C | _PHY_FLD0_RX_IDLE_EN |
-               _PHY_FLD0_BIT_ERR_RSTN | _PHY_FLD0_BER_COUNT |
-               _PHY_FLD0_BER_TIMER | _PHY_FLD0_CHECK_EN);
-
-       rtsx_pci_write_phy_register(pcr, _PHY_ANA03,
-               _PHY_ANA03_TIMER_MAX | _PHY_ANA03_OOBS_DEB_EN |
-               _PHY_CMU_DEBUG_EN);
-
-       if (is_version(pcr, 0x525A, IC_VER_A))
-               rtsx_pci_write_phy_register(pcr, _PHY_REV0,
-                       _PHY_REV0_FILTER_OUT | _PHY_REV0_CDR_BYPASS_PFD |
-                       _PHY_REV0_CDR_RX_IDLE_BYPASS);
-
-       return 0;
-}
-
-static int rts525a_extra_init_hw(struct rtsx_pcr *pcr)
-{
-       rts5249_extra_init_hw(pcr);
-
-       rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
-       if (is_version(pcr, 0x525A, IC_VER_A)) {
-               rtsx_pci_write_register(pcr, L1SUB_CONFIG2,
-                       L1SUB_AUTO_CFG, L1SUB_AUTO_CFG);
-               rtsx_pci_write_register(pcr, RREF_CFG,
-                       RREF_VBGSEL_MASK, RREF_VBGSEL_1V25);
-               rtsx_pci_write_register(pcr, LDO_VIO_CFG,
-                       LDO_VIO_TUNE_MASK, LDO_VIO_1V7);
-               rtsx_pci_write_register(pcr, LDO_DV12S_CFG,
-                       LDO_D12_TUNE_MASK, LDO_D12_TUNE_DF);
-               rtsx_pci_write_register(pcr, LDO_AV12S_CFG,
-                       LDO_AV12S_TUNE_MASK, LDO_AV12S_TUNE_DF);
-               rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
-                       LDO_VCC_LMTVTH_MASK, LDO_VCC_LMTVTH_2A);
-               rtsx_pci_write_register(pcr, OOBS_CONFIG,
-                       OOBS_AUTOK_DIS | OOBS_VAL_MASK, 0x89);
-       }
-
-       return 0;
-}
-
-static const struct pcr_ops rts525a_pcr_ops = {
-       .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
-       .extra_init_hw = rts525a_extra_init_hw,
-       .optimize_phy = rts525a_optimize_phy,
-       .turn_on_led = rtsx_base_turn_on_led,
-       .turn_off_led = rtsx_base_turn_off_led,
-       .enable_auto_blink = rtsx_base_enable_auto_blink,
-       .disable_auto_blink = rtsx_base_disable_auto_blink,
-       .card_power_on = rts525a_card_power_on,
-       .card_power_off = rtsx_base_card_power_off,
-       .switch_output_voltage = rts525a_switch_output_voltage,
-       .force_power_down = rtsx_base_force_power_down,
-       .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
-       .set_aspm = rts5249_set_aspm,
-};
-
-void rts525a_init_params(struct rtsx_pcr *pcr)
-{
-       rts5249_init_params(pcr);
-       pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
-       pcr->option.ltr_l1off_snooze_sspwrgate =
-               LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
-
-       pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
-       pcr->ops = &rts525a_pcr_ops;
-}
-
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
deleted file mode 100644 (file)
index 590fb9a..0000000
+++ /dev/null
@@ -1,1569 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- */
-
-#include <linux/pci.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_pci.h>
-#include <linux/mmc/card.h>
-#include <asm/unaligned.h>
-
-#include "rtsx_pcr.h"
-
-static bool msi_en = true;
-module_param(msi_en, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(msi_en, "Enable MSI");
-
-static DEFINE_IDR(rtsx_pci_idr);
-static DEFINE_SPINLOCK(rtsx_pci_lock);
-
-static struct mfd_cell rtsx_pcr_cells[] = {
-       [RTSX_SD_CARD] = {
-               .name = DRV_NAME_RTSX_PCI_SDMMC,
-       },
-       [RTSX_MS_CARD] = {
-               .name = DRV_NAME_RTSX_PCI_MS,
-       },
-};
-
-static const struct pci_device_id rtsx_pci_ids[] = {
-       { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
-       { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
-
-static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
-{
-       rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
-               0xFC, pcr->aspm_en);
-}
-
-static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
-{
-       rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
-               0xFC, 0);
-}
-
-int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
-{
-       rtsx_pci_write_register(pcr, MSGTXDATA0,
-                               MASK_8_BIT_DEF, (u8) (latency & 0xFF));
-       rtsx_pci_write_register(pcr, MSGTXDATA1,
-                               MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
-       rtsx_pci_write_register(pcr, MSGTXDATA2,
-                               MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
-       rtsx_pci_write_register(pcr, MSGTXDATA3,
-                               MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
-       rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
-               LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
-
-       return 0;
-}
-
-int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
-{
-       if (pcr->ops->set_ltr_latency)
-               return pcr->ops->set_ltr_latency(pcr, latency);
-       else
-               return rtsx_comm_set_ltr_latency(pcr, latency);
-}
-
-static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
-{
-       struct rtsx_cr_option *option = &pcr->option;
-
-       if (pcr->aspm_enabled == enable)
-               return;
-
-       if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
-               if (enable)
-                       rtsx_pci_enable_aspm(pcr);
-               else
-                       rtsx_pci_disable_aspm(pcr);
-       } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
-               u8 mask = FORCE_ASPM_VAL_MASK;
-               u8 val = 0;
-
-               if (enable)
-                       val = pcr->aspm_en;
-               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL,  mask, val);
-       }
-
-       pcr->aspm_enabled = enable;
-}
-
-static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
-{
-       if (pcr->ops->set_aspm)
-               pcr->ops->set_aspm(pcr, false);
-       else
-               rtsx_comm_set_aspm(pcr, false);
-}
-
-int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
-{
-       rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
-
-       return 0;
-}
-
-void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
-{
-       if (pcr->ops->set_l1off_cfg_sub_d0)
-               pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
-}
-
-static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
-{
-       struct rtsx_cr_option *option = &pcr->option;
-
-       rtsx_disable_aspm(pcr);
-
-       if (option->ltr_enabled)
-               rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
-
-       if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
-               rtsx_set_l1off_sub_cfg_d0(pcr, 1);
-}
-
-void rtsx_pm_full_on(struct rtsx_pcr *pcr)
-{
-       if (pcr->ops->full_on)
-               pcr->ops->full_on(pcr);
-       else
-               rtsx_comm_pm_full_on(pcr);
-}
-
-void rtsx_pci_start_run(struct rtsx_pcr *pcr)
-{
-       /* If pci device removed, don't queue idle work any more */
-       if (pcr->remove_pci)
-               return;
-
-       if (pcr->state != PDEV_STAT_RUN) {
-               pcr->state = PDEV_STAT_RUN;
-               if (pcr->ops->enable_auto_blink)
-                       pcr->ops->enable_auto_blink(pcr);
-               rtsx_pm_full_on(pcr);
-       }
-
-       mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
-
-int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
-{
-       int i;
-       u32 val = HAIMR_WRITE_START;
-
-       val |= (u32)(addr & 0x3FFF) << 16;
-       val |= (u32)mask << 8;
-       val |= (u32)data;
-
-       rtsx_pci_writel(pcr, RTSX_HAIMR, val);
-
-       for (i = 0; i < MAX_RW_REG_CNT; i++) {
-               val = rtsx_pci_readl(pcr, RTSX_HAIMR);
-               if ((val & HAIMR_TRANS_END) == 0) {
-                       if (data != (u8)val)
-                               return -EIO;
-                       return 0;
-               }
-       }
-
-       return -ETIMEDOUT;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
-
-int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
-{
-       u32 val = HAIMR_READ_START;
-       int i;
-
-       val |= (u32)(addr & 0x3FFF) << 16;
-       rtsx_pci_writel(pcr, RTSX_HAIMR, val);
-
-       for (i = 0; i < MAX_RW_REG_CNT; i++) {
-               val = rtsx_pci_readl(pcr, RTSX_HAIMR);
-               if ((val & HAIMR_TRANS_END) == 0)
-                       break;
-       }
-
-       if (i >= MAX_RW_REG_CNT)
-               return -ETIMEDOUT;
-
-       if (data)
-               *data = (u8)(val & 0xFF);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
-
-int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
-{
-       int err, i, finished = 0;
-       u8 tmp;
-
-       rtsx_pci_init_cmd(pcr);
-
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
-
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       for (i = 0; i < 100000; i++) {
-               err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
-               if (err < 0)
-                       return err;
-
-               if (!(tmp & 0x80)) {
-                       finished = 1;
-                       break;
-               }
-       }
-
-       if (!finished)
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
-int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
-{
-       if (pcr->ops->write_phy)
-               return pcr->ops->write_phy(pcr, addr, val);
-
-       return __rtsx_pci_write_phy_register(pcr, addr, val);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
-
-int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
-{
-       int err, i, finished = 0;
-       u16 data;
-       u8 *ptr, tmp;
-
-       rtsx_pci_init_cmd(pcr);
-
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
-
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       for (i = 0; i < 100000; i++) {
-               err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
-               if (err < 0)
-                       return err;
-
-               if (!(tmp & 0x80)) {
-                       finished = 1;
-                       break;
-               }
-       }
-
-       if (!finished)
-               return -ETIMEDOUT;
-
-       rtsx_pci_init_cmd(pcr);
-
-       rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
-       rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
-
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       ptr = rtsx_pci_get_cmd_data(pcr);
-       data = ((u16)ptr[1] << 8) | ptr[0];
-
-       if (val)
-               *val = data;
-
-       return 0;
-}
-
-int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
-{
-       if (pcr->ops->read_phy)
-               return pcr->ops->read_phy(pcr, addr, val);
-
-       return __rtsx_pci_read_phy_register(pcr, addr, val);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
-
-void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
-{
-       rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
-       rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
-
-       rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
-       rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
-
-void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
-               u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
-{
-       unsigned long flags;
-       u32 val = 0;
-       u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
-
-       val |= (u32)(cmd_type & 0x03) << 30;
-       val |= (u32)(reg_addr & 0x3FFF) << 16;
-       val |= (u32)mask << 8;
-       val |= (u32)data;
-
-       spin_lock_irqsave(&pcr->lock, flags);
-       ptr += pcr->ci;
-       if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
-               put_unaligned_le32(val, ptr);
-               ptr++;
-               pcr->ci++;
-       }
-       spin_unlock_irqrestore(&pcr->lock, flags);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
-
-void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
-{
-       u32 val = 1 << 31;
-
-       rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
-
-       val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
-       /* Hardware Auto Response */
-       val |= 0x40000000;
-       rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
-
-int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
-{
-       struct completion trans_done;
-       u32 val = 1 << 31;
-       long timeleft;
-       unsigned long flags;
-       int err = 0;
-
-       spin_lock_irqsave(&pcr->lock, flags);
-
-       /* set up data structures for the wakeup system */
-       pcr->done = &trans_done;
-       pcr->trans_result = TRANS_NOT_READY;
-       init_completion(&trans_done);
-
-       rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
-
-       val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
-       /* Hardware Auto Response */
-       val |= 0x40000000;
-       rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
-
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       /* Wait for TRANS_OK_INT */
-       timeleft = wait_for_completion_interruptible_timeout(
-                       &trans_done, msecs_to_jiffies(timeout));
-       if (timeleft <= 0) {
-               pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
-               err = -ETIMEDOUT;
-               goto finish_send_cmd;
-       }
-
-       spin_lock_irqsave(&pcr->lock, flags);
-       if (pcr->trans_result == TRANS_RESULT_FAIL)
-               err = -EINVAL;
-       else if (pcr->trans_result == TRANS_RESULT_OK)
-               err = 0;
-       else if (pcr->trans_result == TRANS_NO_DEVICE)
-               err = -ENODEV;
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-finish_send_cmd:
-       spin_lock_irqsave(&pcr->lock, flags);
-       pcr->done = NULL;
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       if ((err < 0) && (err != -ENODEV))
-               rtsx_pci_stop_cmd(pcr);
-
-       if (pcr->finish_me)
-               complete(pcr->finish_me);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
-
-static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
-               dma_addr_t addr, unsigned int len, int end)
-{
-       u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
-       u64 val;
-       u8 option = SG_VALID | SG_TRANS_DATA;
-
-       pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
-
-       if (end)
-               option |= SG_END;
-       val = ((u64)addr << 32) | ((u64)len << 12) | option;
-
-       put_unaligned_le64(val, ptr);
-       pcr->sgi++;
-}
-
-int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read, int timeout)
-{
-       int err = 0, count;
-
-       pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
-       count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
-       if (count < 1)
-               return -EINVAL;
-       pcr_dbg(pcr, "DMA mapping count: %d\n", count);
-
-       err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
-
-       rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
-
-int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read)
-{
-       enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-       if (pcr->remove_pci)
-               return -EINVAL;
-
-       if ((sglist == NULL) || (num_sg <= 0))
-               return -EINVAL;
-
-       return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
-
-void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read)
-{
-       enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-       dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
-
-int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int count, bool read, int timeout)
-{
-       struct completion trans_done;
-       struct scatterlist *sg;
-       dma_addr_t addr;
-       long timeleft;
-       unsigned long flags;
-       unsigned int len;
-       int i, err = 0;
-       u32 val;
-       u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
-
-       if (pcr->remove_pci)
-               return -ENODEV;
-
-       if ((sglist == NULL) || (count < 1))
-               return -EINVAL;
-
-       val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
-       pcr->sgi = 0;
-       for_each_sg(sglist, sg, count, i) {
-               addr = sg_dma_address(sg);
-               len = sg_dma_len(sg);
-               rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
-       }
-
-       spin_lock_irqsave(&pcr->lock, flags);
-
-       pcr->done = &trans_done;
-       pcr->trans_result = TRANS_NOT_READY;
-       init_completion(&trans_done);
-       rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
-       rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
-
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       timeleft = wait_for_completion_interruptible_timeout(
-                       &trans_done, msecs_to_jiffies(timeout));
-       if (timeleft <= 0) {
-               pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
-               err = -ETIMEDOUT;
-               goto out;
-       }
-
-       spin_lock_irqsave(&pcr->lock, flags);
-       if (pcr->trans_result == TRANS_RESULT_FAIL) {
-               err = -EILSEQ;
-               if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
-                       pcr->dma_error_count++;
-       }
-
-       else if (pcr->trans_result == TRANS_NO_DEVICE)
-               err = -ENODEV;
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-out:
-       spin_lock_irqsave(&pcr->lock, flags);
-       pcr->done = NULL;
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       if ((err < 0) && (err != -ENODEV))
-               rtsx_pci_stop_cmd(pcr);
-
-       if (pcr->finish_me)
-               complete(pcr->finish_me);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
-
-int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
-{
-       int err;
-       int i, j;
-       u16 reg;
-       u8 *ptr;
-
-       if (buf_len > 512)
-               buf_len = 512;
-
-       ptr = buf;
-       reg = PPBUF_BASE2;
-       for (i = 0; i < buf_len / 256; i++) {
-               rtsx_pci_init_cmd(pcr);
-
-               for (j = 0; j < 256; j++)
-                       rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
-
-               err = rtsx_pci_send_cmd(pcr, 250);
-               if (err < 0)
-                       return err;
-
-               memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
-               ptr += 256;
-       }
-
-       if (buf_len % 256) {
-               rtsx_pci_init_cmd(pcr);
-
-               for (j = 0; j < buf_len % 256; j++)
-                       rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
-
-               err = rtsx_pci_send_cmd(pcr, 250);
-               if (err < 0)
-                       return err;
-       }
-
-       memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
-
-int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
-{
-       int err;
-       int i, j;
-       u16 reg;
-       u8 *ptr;
-
-       if (buf_len > 512)
-               buf_len = 512;
-
-       ptr = buf;
-       reg = PPBUF_BASE2;
-       for (i = 0; i < buf_len / 256; i++) {
-               rtsx_pci_init_cmd(pcr);
-
-               for (j = 0; j < 256; j++) {
-                       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                                       reg++, 0xFF, *ptr);
-                       ptr++;
-               }
-
-               err = rtsx_pci_send_cmd(pcr, 250);
-               if (err < 0)
-                       return err;
-       }
-
-       if (buf_len % 256) {
-               rtsx_pci_init_cmd(pcr);
-
-               for (j = 0; j < buf_len % 256; j++) {
-                       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                                       reg++, 0xFF, *ptr);
-                       ptr++;
-               }
-
-               err = rtsx_pci_send_cmd(pcr, 250);
-               if (err < 0)
-                       return err;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
-
-static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
-{
-       rtsx_pci_init_cmd(pcr);
-
-       while (*tbl & 0xFFFF0000) {
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                               (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
-               tbl++;
-       }
-
-       return rtsx_pci_send_cmd(pcr, 100);
-}
-
-int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
-{
-       const u32 *tbl;
-
-       if (card == RTSX_SD_CARD)
-               tbl = pcr->sd_pull_ctl_enable_tbl;
-       else if (card == RTSX_MS_CARD)
-               tbl = pcr->ms_pull_ctl_enable_tbl;
-       else
-               return -EINVAL;
-
-       return rtsx_pci_set_pull_ctl(pcr, tbl);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
-
-int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
-{
-       const u32 *tbl;
-
-       if (card == RTSX_SD_CARD)
-               tbl = pcr->sd_pull_ctl_disable_tbl;
-       else if (card == RTSX_MS_CARD)
-               tbl = pcr->ms_pull_ctl_disable_tbl;
-       else
-               return -EINVAL;
-
-
-       return rtsx_pci_set_pull_ctl(pcr, tbl);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
-
-static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
-{
-       pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
-
-       if (pcr->num_slots > 1)
-               pcr->bier |= MS_INT_EN;
-
-       /* Enable Bus Interrupt */
-       rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
-
-       pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
-}
-
-static inline u8 double_ssc_depth(u8 depth)
-{
-       return ((depth > 1) ? (depth - 1) : depth);
-}
-
-static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
-{
-       if (div > CLK_DIV_1) {
-               if (ssc_depth > (div - 1))
-                       ssc_depth -= (div - 1);
-               else
-                       ssc_depth = SSC_DEPTH_4M;
-       }
-
-       return ssc_depth;
-}
-
-int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
-               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
-{
-       int err, clk;
-       u8 n, clk_divider, mcu_cnt, div;
-       static const u8 depth[] = {
-               [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
-               [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
-               [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
-               [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
-               [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
-       };
-
-       if (initial_mode) {
-               /* We use 250k(around) here, in initial stage */
-               clk_divider = SD_CLK_DIVIDE_128;
-               card_clock = 30000000;
-       } else {
-               clk_divider = SD_CLK_DIVIDE_0;
-       }
-       err = rtsx_pci_write_register(pcr, SD_CFG1,
-                       SD_CLK_DIVIDE_MASK, clk_divider);
-       if (err < 0)
-               return err;
-
-       /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
-       if (card_clock == UHS_SDR104_MAX_DTR &&
-           pcr->dma_error_count &&
-           PCI_PID(pcr) == RTS5227_DEVICE_ID)
-               card_clock = UHS_SDR104_MAX_DTR -
-                       (pcr->dma_error_count * 20000000);
-
-       card_clock /= 1000000;
-       pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
-
-       clk = card_clock;
-       if (!initial_mode && double_clk)
-               clk = card_clock * 2;
-       pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
-               clk, pcr->cur_clock);
-
-       if (clk == pcr->cur_clock)
-               return 0;
-
-       if (pcr->ops->conv_clk_and_div_n)
-               n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
-       else
-               n = (u8)(clk - 2);
-       if ((clk <= 2) || (n > MAX_DIV_N_PCR))
-               return -EINVAL;
-
-       mcu_cnt = (u8)(125/clk + 3);
-       if (mcu_cnt > 15)
-               mcu_cnt = 15;
-
-       /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
-       div = CLK_DIV_1;
-       while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
-               if (pcr->ops->conv_clk_and_div_n) {
-                       int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
-                                       DIV_N_TO_CLK) * 2;
-                       n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
-                                       CLK_TO_DIV_N);
-               } else {
-                       n = (n + 2) * 2 - 2;
-               }
-               div++;
-       }
-       pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
-
-       ssc_depth = depth[ssc_depth];
-       if (double_clk)
-               ssc_depth = double_ssc_depth(ssc_depth);
-
-       ssc_depth = revise_ssc_depth(ssc_depth, div);
-       pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
-
-       rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
-                       CLK_LOW_FREQ, CLK_LOW_FREQ);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
-                       0xFF, (div << 4) | mcu_cnt);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
-                       SSC_DEPTH_MASK, ssc_depth);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
-       if (vpclk) {
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
-                               PHASE_NOT_RESET, 0);
-               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
-                               PHASE_NOT_RESET, PHASE_NOT_RESET);
-       }
-
-       err = rtsx_pci_send_cmd(pcr, 2000);
-       if (err < 0)
-               return err;
-
-       /* Wait SSC clock stable */
-       udelay(10);
-       err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
-       if (err < 0)
-               return err;
-
-       pcr->cur_clock = clk;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
-
-int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
-{
-       if (pcr->ops->card_power_on)
-               return pcr->ops->card_power_on(pcr, card);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
-
-int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
-{
-       if (pcr->ops->card_power_off)
-               return pcr->ops->card_power_off(pcr, card);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
-
-int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
-{
-       static const unsigned int cd_mask[] = {
-               [RTSX_SD_CARD] = SD_EXIST,
-               [RTSX_MS_CARD] = MS_EXIST
-       };
-
-       if (!(pcr->flags & PCR_MS_PMOS)) {
-               /* When using single PMOS, accessing card is not permitted
-                * if the existing card is not the designated one.
-                */
-               if (pcr->card_exist & (~cd_mask[card]))
-                       return -EIO;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
-
-int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
-{
-       if (pcr->ops->switch_output_voltage)
-               return pcr->ops->switch_output_voltage(pcr, voltage);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
-
-unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
-{
-       unsigned int val;
-
-       val = rtsx_pci_readl(pcr, RTSX_BIPR);
-       if (pcr->ops->cd_deglitch)
-               val = pcr->ops->cd_deglitch(pcr);
-
-       return val;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
-
-void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
-{
-       struct completion finish;
-
-       pcr->finish_me = &finish;
-       init_completion(&finish);
-
-       if (pcr->done)
-               complete(pcr->done);
-
-       if (!pcr->remove_pci)
-               rtsx_pci_stop_cmd(pcr);
-
-       wait_for_completion_interruptible_timeout(&finish,
-                       msecs_to_jiffies(2));
-       pcr->finish_me = NULL;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
-
-static void rtsx_pci_card_detect(struct work_struct *work)
-{
-       struct delayed_work *dwork;
-       struct rtsx_pcr *pcr;
-       unsigned long flags;
-       unsigned int card_detect = 0, card_inserted, card_removed;
-       u32 irq_status;
-
-       dwork = to_delayed_work(work);
-       pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
-
-       pcr_dbg(pcr, "--> %s\n", __func__);
-
-       mutex_lock(&pcr->pcr_mutex);
-       spin_lock_irqsave(&pcr->lock, flags);
-
-       irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
-       pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
-
-       irq_status &= CARD_EXIST;
-       card_inserted = pcr->card_inserted & irq_status;
-       card_removed = pcr->card_removed;
-       pcr->card_inserted = 0;
-       pcr->card_removed = 0;
-
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       if (card_inserted || card_removed) {
-               pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
-                       card_inserted, card_removed);
-
-               if (pcr->ops->cd_deglitch)
-                       card_inserted = pcr->ops->cd_deglitch(pcr);
-
-               card_detect = card_inserted | card_removed;
-
-               pcr->card_exist |= card_inserted;
-               pcr->card_exist &= ~card_removed;
-       }
-
-       mutex_unlock(&pcr->pcr_mutex);
-
-       if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
-               pcr->slots[RTSX_SD_CARD].card_event(
-                               pcr->slots[RTSX_SD_CARD].p_dev);
-       if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
-               pcr->slots[RTSX_MS_CARD].card_event(
-                               pcr->slots[RTSX_MS_CARD].p_dev);
-}
-
-static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
-{
-       struct rtsx_pcr *pcr = dev_id;
-       u32 int_reg;
-
-       if (!pcr)
-               return IRQ_NONE;
-
-       spin_lock(&pcr->lock);
-
-       int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
-       /* Clear interrupt flag */
-       rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
-       if ((int_reg & pcr->bier) == 0) {
-               spin_unlock(&pcr->lock);
-               return IRQ_NONE;
-       }
-       if (int_reg == 0xFFFFFFFF) {
-               spin_unlock(&pcr->lock);
-               return IRQ_HANDLED;
-       }
-
-       int_reg &= (pcr->bier | 0x7FFFFF);
-
-       if (int_reg & SD_INT) {
-               if (int_reg & SD_EXIST) {
-                       pcr->card_inserted |= SD_EXIST;
-               } else {
-                       pcr->card_removed |= SD_EXIST;
-                       pcr->card_inserted &= ~SD_EXIST;
-               }
-               pcr->dma_error_count = 0;
-       }
-
-       if (int_reg & MS_INT) {
-               if (int_reg & MS_EXIST) {
-                       pcr->card_inserted |= MS_EXIST;
-               } else {
-                       pcr->card_removed |= MS_EXIST;
-                       pcr->card_inserted &= ~MS_EXIST;
-               }
-       }
-
-       if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
-               if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
-                       pcr->trans_result = TRANS_RESULT_FAIL;
-                       if (pcr->done)
-                               complete(pcr->done);
-               } else if (int_reg & TRANS_OK_INT) {
-                       pcr->trans_result = TRANS_RESULT_OK;
-                       if (pcr->done)
-                               complete(pcr->done);
-               }
-       }
-
-       if (pcr->card_inserted || pcr->card_removed)
-               schedule_delayed_work(&pcr->carddet_work,
-                               msecs_to_jiffies(200));
-
-       spin_unlock(&pcr->lock);
-       return IRQ_HANDLED;
-}
-
-static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
-{
-       pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
-                       __func__, pcr->msi_en, pcr->pci->irq);
-
-       if (request_irq(pcr->pci->irq, rtsx_pci_isr,
-                       pcr->msi_en ? 0 : IRQF_SHARED,
-                       DRV_NAME_RTSX_PCI, pcr)) {
-               dev_err(&(pcr->pci->dev),
-                       "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
-                       pcr->pci->irq);
-               return -1;
-       }
-
-       pcr->irq = pcr->pci->irq;
-       pci_intx(pcr->pci, !pcr->msi_en);
-
-       return 0;
-}
-
-static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
-{
-       if (pcr->ops->set_aspm)
-               pcr->ops->set_aspm(pcr, true);
-       else
-               rtsx_comm_set_aspm(pcr, true);
-}
-
-static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
-{
-       struct rtsx_cr_option *option = &pcr->option;
-
-       if (option->ltr_enabled) {
-               u32 latency = option->ltr_l1off_latency;
-
-               if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
-                       mdelay(option->l1_snooze_delay);
-
-               rtsx_set_ltr_latency(pcr, latency);
-       }
-
-       if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
-               rtsx_set_l1off_sub_cfg_d0(pcr, 0);
-
-       rtsx_enable_aspm(pcr);
-}
-
-void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
-{
-       if (pcr->ops->power_saving)
-               pcr->ops->power_saving(pcr);
-       else
-               rtsx_comm_pm_power_saving(pcr);
-}
-
-static void rtsx_pci_idle_work(struct work_struct *work)
-{
-       struct delayed_work *dwork = to_delayed_work(work);
-       struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
-
-       pcr_dbg(pcr, "--> %s\n", __func__);
-
-       mutex_lock(&pcr->pcr_mutex);
-
-       pcr->state = PDEV_STAT_IDLE;
-
-       if (pcr->ops->disable_auto_blink)
-               pcr->ops->disable_auto_blink(pcr);
-       if (pcr->ops->turn_off_led)
-               pcr->ops->turn_off_led(pcr);
-
-       rtsx_pm_power_saving(pcr);
-
-       mutex_unlock(&pcr->pcr_mutex);
-}
-
-#ifdef CONFIG_PM
-static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
-{
-       if (pcr->ops->turn_off_led)
-               pcr->ops->turn_off_led(pcr);
-
-       rtsx_pci_writel(pcr, RTSX_BIER, 0);
-       pcr->bier = 0;
-
-       rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
-       rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
-
-       if (pcr->ops->force_power_down)
-               pcr->ops->force_power_down(pcr, pm_state);
-}
-#endif
-
-static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
-{
-       int err;
-
-       pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
-       rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
-
-       rtsx_pci_enable_bus_int(pcr);
-
-       /* Power on SSC */
-       err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
-       if (err < 0)
-               return err;
-
-       /* Wait SSC power stable */
-       udelay(200);
-
-       rtsx_pci_disable_aspm(pcr);
-       if (pcr->ops->optimize_phy) {
-               err = pcr->ops->optimize_phy(pcr);
-               if (err < 0)
-                       return err;
-       }
-
-       rtsx_pci_init_cmd(pcr);
-
-       /* Set mcu_cnt to 7 to ensure data can be sampled properly */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
-
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
-       /* Disable card clock */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
-       /* Reset delink mode */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
-       /* Card driving select */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
-                       0xFF, pcr->card_drive_sel);
-       /* Enable SSC Clock */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
-                       0xFF, SSC_8X_EN | SSC_SEL_4M);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
-       /* Disable cd_pwr_save */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
-       /* Clear Link Ready Interrupt */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
-                       LINK_RDY_INT, LINK_RDY_INT);
-       /* Enlarge the estimation window of PERST# glitch
-        * to reduce the chance of invalid card interrupt
-        */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
-       /* Update RC oscillator to 400k
-        * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
-        *                1: 2M  0: 400k
-        */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
-       /* Set interrupt write clear
-        * bit 1: U_elbi_if_rd_clr_en
-        *      1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
-        *      0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
-        */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
-
-       err = rtsx_pci_send_cmd(pcr, 100);
-       if (err < 0)
-               return err;
-
-       switch (PCI_PID(pcr)) {
-       case PID_5250:
-       case PID_524A:
-       case PID_525A:
-               rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
-               break;
-       default:
-               break;
-       }
-
-       /* Enable clk_request_n to enable clock power management */
-       rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
-       /* Enter L1 when host tx idle */
-       rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
-
-       if (pcr->ops->extra_init_hw) {
-               err = pcr->ops->extra_init_hw(pcr);
-               if (err < 0)
-                       return err;
-       }
-
-       /* No CD interrupt if probing driver with card inserted.
-        * So we need to initialize pcr->card_exist here.
-        */
-       if (pcr->ops->cd_deglitch)
-               pcr->card_exist = pcr->ops->cd_deglitch(pcr);
-       else
-               pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
-
-       return 0;
-}
-
-static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
-{
-       int err;
-
-       spin_lock_init(&pcr->lock);
-       mutex_init(&pcr->pcr_mutex);
-
-       switch (PCI_PID(pcr)) {
-       default:
-       case 0x5209:
-               rts5209_init_params(pcr);
-               break;
-
-       case 0x5229:
-               rts5229_init_params(pcr);
-               break;
-
-       case 0x5289:
-               rtl8411_init_params(pcr);
-               break;
-
-       case 0x5227:
-               rts5227_init_params(pcr);
-               break;
-
-       case 0x522A:
-               rts522a_init_params(pcr);
-               break;
-
-       case 0x5249:
-               rts5249_init_params(pcr);
-               break;
-
-       case 0x524A:
-               rts524a_init_params(pcr);
-               break;
-
-       case 0x525A:
-               rts525a_init_params(pcr);
-               break;
-
-       case 0x5287:
-               rtl8411b_init_params(pcr);
-               break;
-
-       case 0x5286:
-               rtl8402_init_params(pcr);
-               break;
-       }
-
-       pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
-                       PCI_PID(pcr), pcr->ic_version);
-
-       pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
-                       GFP_KERNEL);
-       if (!pcr->slots)
-               return -ENOMEM;
-
-       if (pcr->ops->fetch_vendor_settings)
-               pcr->ops->fetch_vendor_settings(pcr);
-
-       pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
-       pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
-                       pcr->sd30_drive_sel_1v8);
-       pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
-                       pcr->sd30_drive_sel_3v3);
-       pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
-                       pcr->card_drive_sel);
-       pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
-
-       pcr->state = PDEV_STAT_IDLE;
-       err = rtsx_pci_init_hw(pcr);
-       if (err < 0) {
-               kfree(pcr->slots);
-               return err;
-       }
-
-       return 0;
-}
-
-static int rtsx_pci_probe(struct pci_dev *pcidev,
-                         const struct pci_device_id *id)
-{
-       struct rtsx_pcr *pcr;
-       struct pcr_handle *handle;
-       u32 base, len;
-       int ret, i, bar = 0;
-
-       dev_dbg(&(pcidev->dev),
-               ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
-               pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
-               (int)pcidev->revision);
-
-       ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
-       if (ret < 0)
-               return ret;
-
-       ret = pci_enable_device(pcidev);
-       if (ret)
-               return ret;
-
-       ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
-       if (ret)
-               goto disable;
-
-       pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
-       if (!pcr) {
-               ret = -ENOMEM;
-               goto release_pci;
-       }
-
-       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-       if (!handle) {
-               ret = -ENOMEM;
-               goto free_pcr;
-       }
-       handle->pcr = pcr;
-
-       idr_preload(GFP_KERNEL);
-       spin_lock(&rtsx_pci_lock);
-       ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
-       if (ret >= 0)
-               pcr->id = ret;
-       spin_unlock(&rtsx_pci_lock);
-       idr_preload_end();
-       if (ret < 0)
-               goto free_handle;
-
-       pcr->pci = pcidev;
-       dev_set_drvdata(&pcidev->dev, handle);
-
-       if (CHK_PCI_PID(pcr, 0x525A))
-               bar = 1;
-       len = pci_resource_len(pcidev, bar);
-       base = pci_resource_start(pcidev, bar);
-       pcr->remap_addr = ioremap_nocache(base, len);
-       if (!pcr->remap_addr) {
-               ret = -ENOMEM;
-               goto free_handle;
-       }
-
-       pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
-                       RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
-                       GFP_KERNEL);
-       if (pcr->rtsx_resv_buf == NULL) {
-               ret = -ENXIO;
-               goto unmap;
-       }
-       pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
-       pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
-       pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
-       pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
-       pcr->card_inserted = 0;
-       pcr->card_removed = 0;
-       INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
-       INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
-
-       pcr->msi_en = msi_en;
-       if (pcr->msi_en) {
-               ret = pci_enable_msi(pcidev);
-               if (ret)
-                       pcr->msi_en = false;
-       }
-
-       ret = rtsx_pci_acquire_irq(pcr);
-       if (ret < 0)
-               goto disable_msi;
-
-       pci_set_master(pcidev);
-       synchronize_irq(pcr->irq);
-
-       ret = rtsx_pci_init_chip(pcr);
-       if (ret < 0)
-               goto disable_irq;
-
-       for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
-               rtsx_pcr_cells[i].platform_data = handle;
-               rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
-       }
-       ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
-                       ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
-       if (ret < 0)
-               goto disable_irq;
-
-       schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
-
-       return 0;
-
-disable_irq:
-       free_irq(pcr->irq, (void *)pcr);
-disable_msi:
-       if (pcr->msi_en)
-               pci_disable_msi(pcr->pci);
-       dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
-                       pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
-unmap:
-       iounmap(pcr->remap_addr);
-free_handle:
-       kfree(handle);
-free_pcr:
-       kfree(pcr);
-release_pci:
-       pci_release_regions(pcidev);
-disable:
-       pci_disable_device(pcidev);
-
-       return ret;
-}
-
-static void rtsx_pci_remove(struct pci_dev *pcidev)
-{
-       struct pcr_handle *handle = pci_get_drvdata(pcidev);
-       struct rtsx_pcr *pcr = handle->pcr;
-
-       pcr->remove_pci = true;
-
-       /* Disable interrupts at the pcr level */
-       spin_lock_irq(&pcr->lock);
-       rtsx_pci_writel(pcr, RTSX_BIER, 0);
-       pcr->bier = 0;
-       spin_unlock_irq(&pcr->lock);
-
-       cancel_delayed_work_sync(&pcr->carddet_work);
-       cancel_delayed_work_sync(&pcr->idle_work);
-
-       mfd_remove_devices(&pcidev->dev);
-
-       dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
-                       pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
-       free_irq(pcr->irq, (void *)pcr);
-       if (pcr->msi_en)
-               pci_disable_msi(pcr->pci);
-       iounmap(pcr->remap_addr);
-
-       pci_release_regions(pcidev);
-       pci_disable_device(pcidev);
-
-       spin_lock(&rtsx_pci_lock);
-       idr_remove(&rtsx_pci_idr, pcr->id);
-       spin_unlock(&rtsx_pci_lock);
-
-       kfree(pcr->slots);
-       kfree(pcr);
-       kfree(handle);
-
-       dev_dbg(&(pcidev->dev),
-               ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
-               pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
-}
-
-#ifdef CONFIG_PM
-
-static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
-{
-       struct pcr_handle *handle;
-       struct rtsx_pcr *pcr;
-
-       dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
-
-       handle = pci_get_drvdata(pcidev);
-       pcr = handle->pcr;
-
-       cancel_delayed_work(&pcr->carddet_work);
-       cancel_delayed_work(&pcr->idle_work);
-
-       mutex_lock(&pcr->pcr_mutex);
-
-       rtsx_pci_power_off(pcr, HOST_ENTER_S3);
-
-       pci_save_state(pcidev);
-       pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
-       pci_disable_device(pcidev);
-       pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
-
-       mutex_unlock(&pcr->pcr_mutex);
-       return 0;
-}
-
-static int rtsx_pci_resume(struct pci_dev *pcidev)
-{
-       struct pcr_handle *handle;
-       struct rtsx_pcr *pcr;
-       int ret = 0;
-
-       dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
-
-       handle = pci_get_drvdata(pcidev);
-       pcr = handle->pcr;
-
-       mutex_lock(&pcr->pcr_mutex);
-
-       pci_set_power_state(pcidev, PCI_D0);
-       pci_restore_state(pcidev);
-       ret = pci_enable_device(pcidev);
-       if (ret)
-               goto out;
-       pci_set_master(pcidev);
-
-       ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
-       if (ret)
-               goto out;
-
-       ret = rtsx_pci_init_hw(pcr);
-       if (ret)
-               goto out;
-
-       schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
-
-out:
-       mutex_unlock(&pcr->pcr_mutex);
-       return ret;
-}
-
-static void rtsx_pci_shutdown(struct pci_dev *pcidev)
-{
-       struct pcr_handle *handle;
-       struct rtsx_pcr *pcr;
-
-       dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
-
-       handle = pci_get_drvdata(pcidev);
-       pcr = handle->pcr;
-       rtsx_pci_power_off(pcr, HOST_ENTER_S1);
-
-       pci_disable_device(pcidev);
-}
-
-#else /* CONFIG_PM */
-
-#define rtsx_pci_suspend NULL
-#define rtsx_pci_resume NULL
-#define rtsx_pci_shutdown NULL
-
-#endif /* CONFIG_PM */
-
-static struct pci_driver rtsx_pci_driver = {
-       .name = DRV_NAME_RTSX_PCI,
-       .id_table = rtsx_pci_ids,
-       .probe = rtsx_pci_probe,
-       .remove = rtsx_pci_remove,
-       .suspend = rtsx_pci_suspend,
-       .resume = rtsx_pci_resume,
-       .shutdown = rtsx_pci_shutdown,
-};
-module_pci_driver(rtsx_pci_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
-MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
deleted file mode 100644 (file)
index ec784e0..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- */
-
-#ifndef __RTSX_PCR_H
-#define __RTSX_PCR_H
-
-#include <linux/mfd/rtsx_pci.h>
-
-#define MIN_DIV_N_PCR          80
-#define MAX_DIV_N_PCR          208
-
-#define RTS522A_PM_CTRL3               0xFF7E
-
-#define RTS524A_PME_FORCE_CTL          0xFF78
-#define RTS524A_PM_CTRL3               0xFF7E
-
-#define LTR_ACTIVE_LATENCY_DEF         0x883C
-#define LTR_IDLE_LATENCY_DEF           0x892C
-#define LTR_L1OFF_LATENCY_DEF          0x9003
-#define L1_SNOOZE_DELAY_DEF            1
-#define LTR_L1OFF_SSPWRGATE_5249_DEF           0xAF
-#define LTR_L1OFF_SSPWRGATE_5250_DEF           0xFF
-#define LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF    0xAC
-#define LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF    0xF8
-#define CMD_TIMEOUT_DEF                100
-#define ASPM_MASK_NEG          0xFC
-#define MASK_8_BIT_DEF         0xFF
-
-int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
-int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
-
-void rts5209_init_params(struct rtsx_pcr *pcr);
-void rts5229_init_params(struct rtsx_pcr *pcr);
-void rtl8411_init_params(struct rtsx_pcr *pcr);
-void rtl8402_init_params(struct rtsx_pcr *pcr);
-void rts5227_init_params(struct rtsx_pcr *pcr);
-void rts522a_init_params(struct rtsx_pcr *pcr);
-void rts5249_init_params(struct rtsx_pcr *pcr);
-void rts524a_init_params(struct rtsx_pcr *pcr);
-void rts525a_init_params(struct rtsx_pcr *pcr);
-void rtl8411b_init_params(struct rtsx_pcr *pcr);
-
-static inline u8 map_sd_drive(int idx)
-{
-       u8 sd_drive[4] = {
-               0x01,   /* Type D */
-               0x02,   /* Type C */
-               0x05,   /* Type A */
-               0x03    /* Type B */
-       };
-
-       return sd_drive[idx];
-}
-
-#define rtsx_vendor_setting_valid(reg)         (!((reg) & 0x1000000))
-#define rts5209_vendor_setting1_valid(reg)     (!((reg) & 0x80))
-#define rts5209_vendor_setting2_valid(reg)     ((reg) & 0x80)
-
-#define rtsx_reg_to_aspm(reg)                  (((reg) >> 28) & 0x03)
-#define rtsx_reg_to_sd30_drive_sel_1v8(reg)    (((reg) >> 26) & 0x03)
-#define rtsx_reg_to_sd30_drive_sel_3v3(reg)    (((reg) >> 5) & 0x03)
-#define rtsx_reg_to_card_drive_sel(reg)                ((((reg) >> 25) & 0x01) << 6)
-#define rtsx_reg_check_reverse_socket(reg)     ((reg) & 0x4000)
-#define rts5209_reg_to_aspm(reg)               (((reg) >> 5) & 0x03)
-#define rts5209_reg_check_ms_pmos(reg)         (!((reg) & 0x08))
-#define rts5209_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 3) & 0x07)
-#define rts5209_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x07)
-#define rts5209_reg_to_card_drive_sel(reg)     ((reg) >> 8)
-#define rtl8411_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x07)
-#define rtl8411b_reg_to_sd30_drive_sel_3v3(reg)        ((reg) & 0x03)
-
-#define set_pull_ctrl_tables(pcr, __device)                            \
-do {                                                                   \
-       pcr->sd_pull_ctl_enable_tbl  = __device##_sd_pull_ctl_enable_tbl;  \
-       pcr->sd_pull_ctl_disable_tbl = __device##_sd_pull_ctl_disable_tbl; \
-       pcr->ms_pull_ctl_enable_tbl  = __device##_ms_pull_ctl_enable_tbl;  \
-       pcr->ms_pull_ctl_disable_tbl = __device##_ms_pull_ctl_disable_tbl; \
-} while (0)
-
-/* generic operations */
-int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
-int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
-int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
-
-#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
deleted file mode 100644 (file)
index 59d61b0..0000000
+++ /dev/null
@@ -1,791 +0,0 @@
-/* Driver for Realtek USB card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Roger Tseng <rogerable@realtek.com>
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/usb.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_usb.h>
-
-static int polling_pipe = 1;
-module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(polling_pipe, "polling pipe (0: ctl, 1: bulk)");
-
-static const struct mfd_cell rtsx_usb_cells[] = {
-       [RTSX_USB_SD_CARD] = {
-               .name = "rtsx_usb_sdmmc",
-               .pdata_size = 0,
-       },
-       [RTSX_USB_MS_CARD] = {
-               .name = "rtsx_usb_ms",
-               .pdata_size = 0,
-       },
-};
-
-static void rtsx_usb_sg_timed_out(struct timer_list *t)
-{
-       struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
-
-       dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
-       usb_sg_cancel(&ucr->current_sg);
-}
-
-static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr,
-               unsigned int pipe, struct scatterlist *sg, int num_sg,
-               unsigned int length, unsigned int *act_len, int timeout)
-{
-       int ret;
-
-       dev_dbg(&ucr->pusb_intf->dev, "%s: xfer %u bytes, %d entries\n",
-                       __func__, length, num_sg);
-       ret = usb_sg_init(&ucr->current_sg, ucr->pusb_dev, pipe, 0,
-                       sg, num_sg, length, GFP_NOIO);
-       if (ret)
-               return ret;
-
-       ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout);
-       add_timer(&ucr->sg_timer);
-       usb_sg_wait(&ucr->current_sg);
-       if (!del_timer_sync(&ucr->sg_timer))
-               ret = -ETIMEDOUT;
-       else
-               ret = ucr->current_sg.status;
-
-       if (act_len)
-               *act_len = ucr->current_sg.bytes;
-
-       return ret;
-}
-
-int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
-                             void *buf, unsigned int len, int num_sg,
-                             unsigned int *act_len, int timeout)
-{
-       if (timeout < 600)
-               timeout = 600;
-
-       if (num_sg)
-               return rtsx_usb_bulk_transfer_sglist(ucr, pipe,
-                               (struct scatterlist *)buf, num_sg, len, act_len,
-                               timeout);
-       else
-               return usb_bulk_msg(ucr->pusb_dev, pipe, buf, len, act_len,
-                               timeout);
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_transfer_data);
-
-static inline void rtsx_usb_seq_cmd_hdr(struct rtsx_ucr *ucr,
-               u16 addr, u16 len, u8 seq_type)
-{
-       rtsx_usb_cmd_hdr_tag(ucr);
-
-       ucr->cmd_buf[PACKET_TYPE] = seq_type;
-       ucr->cmd_buf[5] = (u8)(len >> 8);
-       ucr->cmd_buf[6] = (u8)len;
-       ucr->cmd_buf[8] = (u8)(addr >> 8);
-       ucr->cmd_buf[9] = (u8)addr;
-
-       if (seq_type == SEQ_WRITE)
-               ucr->cmd_buf[STAGE_FLAG] = 0;
-       else
-               ucr->cmd_buf[STAGE_FLAG] = STAGE_R;
-}
-
-static int rtsx_usb_seq_write_register(struct rtsx_ucr *ucr,
-               u16 addr, u16 len, u8 *data)
-{
-       u16 cmd_len = ALIGN(SEQ_WRITE_DATA_OFFSET + len, 4);
-
-       if (!data)
-               return -EINVAL;
-
-       if (cmd_len > IOBUF_SIZE)
-               return -EINVAL;
-
-       rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_WRITE);
-       memcpy(ucr->cmd_buf + SEQ_WRITE_DATA_OFFSET, data, len);
-
-       return rtsx_usb_transfer_data(ucr,
-                       usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
-                       ucr->cmd_buf, cmd_len, 0, NULL, 100);
-}
-
-static int rtsx_usb_seq_read_register(struct rtsx_ucr *ucr,
-               u16 addr, u16 len, u8 *data)
-{
-       int i, ret;
-       u16 rsp_len = round_down(len, 4);
-       u16 res_len = len - rsp_len;
-
-       if (!data)
-               return -EINVAL;
-
-       /* 4-byte aligned part */
-       if (rsp_len) {
-               rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_READ);
-               ret = rtsx_usb_transfer_data(ucr,
-                               usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
-                               ucr->cmd_buf, 12, 0, NULL, 100);
-               if (ret)
-                       return ret;
-
-               ret = rtsx_usb_transfer_data(ucr,
-                               usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN),
-                               data, rsp_len, 0, NULL, 100);
-               if (ret)
-                       return ret;
-       }
-
-       /* unaligned part */
-       for (i = 0; i < res_len; i++) {
-               ret = rtsx_usb_read_register(ucr, addr + rsp_len + i,
-                               data + rsp_len + i);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len)
-{
-       return rtsx_usb_seq_read_register(ucr, PPBUF_BASE2, (u16)buf_len, buf);
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_read_ppbuf);
-
-int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len)
-{
-       return rtsx_usb_seq_write_register(ucr, PPBUF_BASE2, (u16)buf_len, buf);
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_write_ppbuf);
-
-int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr,
-               u8 mask, u8 data)
-{
-       u16 value, index;
-
-       addr |= EP0_WRITE_REG_CMD << EP0_OP_SHIFT;
-       value = swab16(addr);
-       index = mask | data << 8;
-
-       return usb_control_msg(ucr->pusb_dev,
-                       usb_sndctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
-                       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                       value, index, NULL, 0, 100);
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
-
-int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
-{
-       u16 value;
-       u8 *buf;
-       int ret;
-
-       if (!data)
-               return -EINVAL;
-
-       buf = kzalloc(sizeof(u8), GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
-       value = swab16(addr);
-
-       ret = usb_control_msg(ucr->pusb_dev,
-                       usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
-                       USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                       value, 0, buf, 1, 100);
-       *data = *buf;
-
-       kfree(buf);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
-
-void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr,
-               u8 mask, u8 data)
-{
-       int i;
-
-       if (ucr->cmd_idx < (IOBUF_SIZE - CMD_OFFSET) / 4) {
-               i = CMD_OFFSET + ucr->cmd_idx * 4;
-
-               ucr->cmd_buf[i++] = ((cmd_type & 0x03) << 6) |
-                       (u8)((reg_addr >> 8) & 0x3F);
-               ucr->cmd_buf[i++] = (u8)reg_addr;
-               ucr->cmd_buf[i++] = mask;
-               ucr->cmd_buf[i++] = data;
-
-               ucr->cmd_idx++;
-       }
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_add_cmd);
-
-int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout)
-{
-       int ret;
-
-       ucr->cmd_buf[CNT_H] = (u8)(ucr->cmd_idx >> 8);
-       ucr->cmd_buf[CNT_L] = (u8)(ucr->cmd_idx);
-       ucr->cmd_buf[STAGE_FLAG] = flag;
-
-       ret = rtsx_usb_transfer_data(ucr,
-                       usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
-                       ucr->cmd_buf, ucr->cmd_idx * 4 + CMD_OFFSET,
-                       0, NULL, timeout);
-       if (ret) {
-               rtsx_usb_clear_fsm_err(ucr);
-               return ret;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_send_cmd);
-
-int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout)
-{
-       if (rsp_len <= 0)
-               return -EINVAL;
-
-       rsp_len = ALIGN(rsp_len, 4);
-
-       return rtsx_usb_transfer_data(ucr,
-                       usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN),
-                       ucr->rsp_buf, rsp_len, 0, NULL, timeout);
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_get_rsp);
-
-static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
-{
-       int ret;
-
-       rtsx_usb_init_cmd(ucr);
-       rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_EXIST, 0x00, 0x00);
-       rtsx_usb_add_cmd(ucr, READ_REG_CMD, OCPSTAT, 0x00, 0x00);
-       ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
-       if (ret)
-               return ret;
-
-       ret = rtsx_usb_get_rsp(ucr, 2, 100);
-       if (ret)
-               return ret;
-
-       *status = ((ucr->rsp_buf[0] >> 2) & 0x0f) |
-                 ((ucr->rsp_buf[1] & 0x03) << 4);
-
-       return 0;
-}
-
-int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
-{
-       int ret;
-       u16 *buf;
-
-       if (!status)
-               return -EINVAL;
-
-       if (polling_pipe == 0) {
-               buf = kzalloc(sizeof(u16), GFP_KERNEL);
-               if (!buf)
-                       return -ENOMEM;
-
-               ret = usb_control_msg(ucr->pusb_dev,
-                               usb_rcvctrlpipe(ucr->pusb_dev, 0),
-                               RTSX_USB_REQ_POLL,
-                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                               0, 0, buf, 2, 100);
-               *status = *buf;
-
-               kfree(buf);
-       } else {
-               ret = rtsx_usb_get_status_with_bulk(ucr, status);
-       }
-
-       /* usb_control_msg may return positive when success */
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_get_card_status);
-
-static int rtsx_usb_write_phy_register(struct rtsx_ucr *ucr, u8 addr, u8 val)
-{
-       dev_dbg(&ucr->pusb_intf->dev, "Write 0x%x to phy register 0x%x\n",
-                       val, addr);
-
-       rtsx_usb_init_cmd(ucr);
-
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VSTAIN, 0xFF, val);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL, 0xFF, addr & 0x0F);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL,
-                       0xFF, (addr >> 4) & 0x0F);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
-
-       return rtsx_usb_send_cmd(ucr, MODE_C, 100);
-}
-
-int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data)
-{
-       rtsx_usb_init_cmd(ucr);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, addr, mask, data);
-       return rtsx_usb_send_cmd(ucr, MODE_C, 100);
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_write_register);
-
-int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
-{
-       int ret;
-
-       if (data != NULL)
-               *data = 0;
-
-       rtsx_usb_init_cmd(ucr);
-       rtsx_usb_add_cmd(ucr, READ_REG_CMD, addr, 0, 0);
-       ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
-       if (ret)
-               return ret;
-
-       ret = rtsx_usb_get_rsp(ucr, 1, 100);
-       if (ret)
-               return ret;
-
-       if (data != NULL)
-               *data = ucr->rsp_buf[0];
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_read_register);
-
-static inline u8 double_ssc_depth(u8 depth)
-{
-       return (depth > 1) ? (depth - 1) : depth;
-}
-
-static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
-{
-       if (div > CLK_DIV_1) {
-               if (ssc_depth > div - 1)
-                       ssc_depth -= (div - 1);
-               else
-                       ssc_depth = SSC_DEPTH_2M;
-       }
-
-       return ssc_depth;
-}
-
-int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock,
-               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
-{
-       int ret;
-       u8 n, clk_divider, mcu_cnt, div;
-
-       if (!card_clock) {
-               ucr->cur_clk = 0;
-               return 0;
-       }
-
-       if (initial_mode) {
-               /* We use 250k(around) here, in initial stage */
-               clk_divider = SD_CLK_DIVIDE_128;
-               card_clock = 30000000;
-       } else {
-               clk_divider = SD_CLK_DIVIDE_0;
-       }
-
-       ret = rtsx_usb_write_register(ucr, SD_CFG1,
-                       SD_CLK_DIVIDE_MASK, clk_divider);
-       if (ret < 0)
-               return ret;
-
-       card_clock /= 1000000;
-       dev_dbg(&ucr->pusb_intf->dev,
-                       "Switch card clock to %dMHz\n", card_clock);
-
-       if (!initial_mode && double_clk)
-               card_clock *= 2;
-       dev_dbg(&ucr->pusb_intf->dev,
-                       "Internal SSC clock: %dMHz (cur_clk = %d)\n",
-                       card_clock, ucr->cur_clk);
-
-       if (card_clock == ucr->cur_clk)
-               return 0;
-
-       /* Converting clock value into internal settings: n and div */
-       n = card_clock - 2;
-       if ((card_clock <= 2) || (n > MAX_DIV_N))
-               return -EINVAL;
-
-       mcu_cnt = 60/card_clock + 3;
-       if (mcu_cnt > 15)
-               mcu_cnt = 15;
-
-       /* Make sure that the SSC clock div_n is not less than MIN_DIV_N */
-
-       div = CLK_DIV_1;
-       while (n < MIN_DIV_N && div < CLK_DIV_4) {
-               n = (n + 2) * 2 - 2;
-               div++;
-       }
-       dev_dbg(&ucr->pusb_intf->dev, "n = %d, div = %d\n", n, div);
-
-       if (double_clk)
-               ssc_depth = double_ssc_depth(ssc_depth);
-
-       ssc_depth = revise_ssc_depth(ssc_depth, div);
-       dev_dbg(&ucr->pusb_intf->dev, "ssc_depth = %d\n", ssc_depth);
-
-       rtsx_usb_init_cmd(ucr);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV,
-                       0x3F, (div << 4) | mcu_cnt);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL2,
-                       SSC_DEPTH_MASK, ssc_depth);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
-       if (vpclk) {
-               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
-                               PHASE_NOT_RESET, 0);
-               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
-                               PHASE_NOT_RESET, PHASE_NOT_RESET);
-       }
-
-       ret = rtsx_usb_send_cmd(ucr, MODE_C, 2000);
-       if (ret < 0)
-               return ret;
-
-       ret = rtsx_usb_write_register(ucr, SSC_CTL1, 0xff,
-                       SSC_RSTB | SSC_8X_EN | SSC_SEL_4M);
-       if (ret < 0)
-               return ret;
-
-       /* Wait SSC clock stable */
-       usleep_range(100, 1000);
-
-       ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0);
-       if (ret < 0)
-               return ret;
-
-       ucr->cur_clk = card_clock;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_switch_clock);
-
-int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card)
-{
-       int ret;
-       u16 val;
-       u16 cd_mask[] = {
-               [RTSX_USB_SD_CARD] = (CD_MASK & ~SD_CD),
-               [RTSX_USB_MS_CARD] = (CD_MASK & ~MS_CD)
-       };
-
-       ret = rtsx_usb_get_card_status(ucr, &val);
-       /*
-        * If get status fails, return 0 (ok) for the exclusive check
-        * and let the flow fail at somewhere else.
-        */
-       if (ret)
-               return 0;
-
-       if (val & cd_mask[card])
-               return -EIO;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_usb_card_exclusive_check);
-
-static int rtsx_usb_reset_chip(struct rtsx_ucr *ucr)
-{
-       int ret;
-       u8 val;
-
-       rtsx_usb_init_cmd(ucr);
-
-       if (CHECK_PKG(ucr, LQFP48)) {
-               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
-                               LDO3318_PWR_MASK, LDO_SUSPEND);
-               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
-                               FORCE_LDO_POWERB, FORCE_LDO_POWERB);
-               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1,
-                               0x30, 0x10);
-               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5,
-                               0x03, 0x01);
-               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6,
-                               0x0C, 0x04);
-       }
-
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SYS_DUMMY0, NYET_MSAK, NYET_EN);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CD_DEGLITCH_WIDTH, 0xFF, 0x08);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
-                       CD_DEGLITCH_EN, XD_CD_DEGLITCH_EN, 0x0);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD30_DRIVE_SEL,
-                       SD30_DRIVE_MASK, DRIVER_TYPE_D);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
-                       CARD_DRIVE_SEL, SD20_DRIVE_MASK, 0x0);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, 0xE0, 0x0);
-
-       if (ucr->is_rts5179)
-               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
-                               CARD_PULL_CTL5, 0x03, 0x01);
-
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DMA1_CTL,
-                      EXTEND_DMA1_ASYNC_SIGNAL, EXTEND_DMA1_ASYNC_SIGNAL);
-       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_INT_PEND,
-                       XD_INT | MS_INT | SD_INT,
-                       XD_INT | MS_INT | SD_INT);
-
-       ret = rtsx_usb_send_cmd(ucr, MODE_C, 100);
-       if (ret)
-               return ret;
-
-       /* config non-crystal mode */
-       rtsx_usb_read_register(ucr, CFG_MODE, &val);
-       if ((val & XTAL_FREE) || ((val & CLK_MODE_MASK) == CLK_MODE_NON_XTAL)) {
-               ret = rtsx_usb_write_phy_register(ucr, 0xC2, 0x7C);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int rtsx_usb_init_chip(struct rtsx_ucr *ucr)
-{
-       int ret;
-       u8 val;
-
-       rtsx_usb_clear_fsm_err(ucr);
-
-       /* power on SSC */
-       ret = rtsx_usb_write_register(ucr,
-                       FPDCTL, SSC_POWER_MASK, SSC_POWER_ON);
-       if (ret)
-               return ret;
-
-       usleep_range(100, 1000);
-       ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0x00);
-       if (ret)
-               return ret;
-
-       /* determine IC version */
-       ret = rtsx_usb_read_register(ucr, HW_VERSION, &val);
-       if (ret)
-               return ret;
-
-       ucr->ic_version = val & HW_VER_MASK;
-
-       /* determine package */
-       ret = rtsx_usb_read_register(ucr, CARD_SHARE_MODE, &val);
-       if (ret)
-               return ret;
-
-       if (val & CARD_SHARE_LQFP_SEL) {
-               ucr->package = LQFP48;
-               dev_dbg(&ucr->pusb_intf->dev, "Package: LQFP48\n");
-       } else {
-               ucr->package = QFN24;
-               dev_dbg(&ucr->pusb_intf->dev, "Package: QFN24\n");
-       }
-
-       /* determine IC variations */
-       rtsx_usb_read_register(ucr, CFG_MODE_1, &val);
-       if (val & RTS5179) {
-               ucr->is_rts5179 = true;
-               dev_dbg(&ucr->pusb_intf->dev, "Device is rts5179\n");
-       } else {
-               ucr->is_rts5179 = false;
-       }
-
-       return rtsx_usb_reset_chip(ucr);
-}
-
-static int rtsx_usb_probe(struct usb_interface *intf,
-                        const struct usb_device_id *id)
-{
-       struct usb_device *usb_dev = interface_to_usbdev(intf);
-       struct rtsx_ucr *ucr;
-       int ret;
-
-       dev_dbg(&intf->dev,
-               ": Realtek USB Card Reader found at bus %03d address %03d\n",
-                usb_dev->bus->busnum, usb_dev->devnum);
-
-       ucr = devm_kzalloc(&intf->dev, sizeof(*ucr), GFP_KERNEL);
-       if (!ucr)
-               return -ENOMEM;
-
-       ucr->pusb_dev = usb_dev;
-
-       ucr->iobuf = usb_alloc_coherent(ucr->pusb_dev, IOBUF_SIZE,
-                       GFP_KERNEL, &ucr->iobuf_dma);
-       if (!ucr->iobuf)
-               return -ENOMEM;
-
-       usb_set_intfdata(intf, ucr);
-
-       ucr->vendor_id = id->idVendor;
-       ucr->product_id = id->idProduct;
-       ucr->cmd_buf = ucr->rsp_buf = ucr->iobuf;
-
-       mutex_init(&ucr->dev_mutex);
-
-       ucr->pusb_intf = intf;
-
-       /* initialize */
-       ret = rtsx_usb_init_chip(ucr);
-       if (ret)
-               goto out_init_fail;
-
-       /* initialize USB SG transfer timer */
-       timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
-
-       ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
-                                     ARRAY_SIZE(rtsx_usb_cells));
-       if (ret)
-               goto out_init_fail;
-
-#ifdef CONFIG_PM
-       intf->needs_remote_wakeup = 1;
-       usb_enable_autosuspend(usb_dev);
-#endif
-
-       return 0;
-
-out_init_fail:
-       usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
-                       ucr->iobuf_dma);
-       return ret;
-}
-
-static void rtsx_usb_disconnect(struct usb_interface *intf)
-{
-       struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
-
-       dev_dbg(&intf->dev, "%s called\n", __func__);
-
-       mfd_remove_devices(&intf->dev);
-
-       usb_set_intfdata(ucr->pusb_intf, NULL);
-       usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
-                       ucr->iobuf_dma);
-}
-
-#ifdef CONFIG_PM
-static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
-{
-       struct rtsx_ucr *ucr =
-               (struct rtsx_ucr *)usb_get_intfdata(intf);
-       u16 val = 0;
-
-       dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
-                       __func__, message.event);
-
-       if (PMSG_IS_AUTO(message)) {
-               if (mutex_trylock(&ucr->dev_mutex)) {
-                       rtsx_usb_get_card_status(ucr, &val);
-                       mutex_unlock(&ucr->dev_mutex);
-
-                       /* Defer the autosuspend if card exists */
-                       if (val & (SD_CD | MS_CD))
-                               return -EAGAIN;
-               } else {
-                       /* There is an ongoing operation*/
-                       return -EAGAIN;
-               }
-       }
-
-       return 0;
-}
-
-static int rtsx_usb_resume(struct usb_interface *intf)
-{
-       return 0;
-}
-
-static int rtsx_usb_reset_resume(struct usb_interface *intf)
-{
-       struct rtsx_ucr *ucr =
-               (struct rtsx_ucr *)usb_get_intfdata(intf);
-
-       rtsx_usb_reset_chip(ucr);
-       return 0;
-}
-
-#else /* CONFIG_PM */
-
-#define rtsx_usb_suspend NULL
-#define rtsx_usb_resume NULL
-#define rtsx_usb_reset_resume NULL
-
-#endif /* CONFIG_PM */
-
-
-static int rtsx_usb_pre_reset(struct usb_interface *intf)
-{
-       struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
-
-       mutex_lock(&ucr->dev_mutex);
-       return 0;
-}
-
-static int rtsx_usb_post_reset(struct usb_interface *intf)
-{
-       struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
-
-       mutex_unlock(&ucr->dev_mutex);
-       return 0;
-}
-
-static struct usb_device_id rtsx_usb_usb_ids[] = {
-       { USB_DEVICE(0x0BDA, 0x0129) },
-       { USB_DEVICE(0x0BDA, 0x0139) },
-       { USB_DEVICE(0x0BDA, 0x0140) },
-       { }
-};
-MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids);
-
-static struct usb_driver rtsx_usb_driver = {
-       .name                   = "rtsx_usb",
-       .probe                  = rtsx_usb_probe,
-       .disconnect             = rtsx_usb_disconnect,
-       .suspend                = rtsx_usb_suspend,
-       .resume                 = rtsx_usb_resume,
-       .reset_resume           = rtsx_usb_reset_resume,
-       .pre_reset              = rtsx_usb_pre_reset,
-       .post_reset             = rtsx_usb_post_reset,
-       .id_table               = rtsx_usb_usb_ids,
-       .supports_autosuspend   = 1,
-       .soft_unbind            = 1,
-};
-
-module_usb_driver(rtsx_usb_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Roger Tseng <rogerable@realtek.com>");
-MODULE_DESCRIPTION("Realtek USB Card Reader Driver");
index 075330a..a00f99f 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * STM32 Low-Power Timer parent driver.
- *
  * Copyright (C) STMicroelectronics 2017
- *
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
  * Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms:  GNU General Public License (GPL), version 2
  */
 
 #include <linux/mfd/stm32-lptimer.h>
index a6675a4..1d347e5 100644 (file)
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) STMicroelectronics 2016
- *
  * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms:  GNU General Public License (GPL), version 2
  */
 
 #include <linux/mfd/stm32-timers.h>
index 0f3fab4..3cd958a 100644 (file)
@@ -124,7 +124,7 @@ static      int ti_tscadc_probe(struct platform_device *pdev)
        struct ti_tscadc_dev    *tscadc;
        struct resource         *res;
        struct clk              *clk;
-       struct device_node      *node = pdev->dev.of_node;
+       struct device_node      *node;
        struct mfd_cell         *cell;
        struct property         *prop;
        const __be32            *cur;
index 83af78c..ebf54cc 100644 (file)
@@ -9,6 +9,26 @@
 #include <linux/export.h>
 #include <linux/mfd/tmio.h>
 
+#define CNF_CMD     0x04
+#define CNF_CTL_BASE   0x10
+#define CNF_INT_PIN  0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define   SDCREN 0x2   /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
 {
        /* Enable the MMC/SD Control registers */
index da16bf4..dc94ffc 100644 (file)
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
 EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
 
 static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
-                             struct device_node *node)
+                             struct device_node *parent)
 {
+       struct device_node *node;
+
        if (pdata && pdata->codec)
                return true;
 
-       if (of_find_node_by_name(node, "codec"))
+       node = of_get_child_by_name(parent, "codec");
+       if (node) {
+               of_node_put(node);
                return true;
+       }
 
        return false;
 }
index d66502d..dd19f17 100644 (file)
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
 };
 
 
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
 {
-#ifdef CONFIG_OF
-       if (of_find_node_by_name(node, "vibra"))
+       struct device_node *node;
+
+       node = of_get_child_by_name(parent, "vibra");
+       if (node) {
+               of_node_put(node);
                return true;
-#endif
+       }
+
        return false;
 }
 
index f1a5c23..7c0fa24 100644 (file)
@@ -496,6 +496,10 @@ config PCI_ENDPOINT_TEST
            Enable this configuration option to enable the host side test driver
            for PCI Endpoint.
 
+config MISC_RTSX
+       tristate
+       default MISC_RTSX_PCI || MISC_RTSX_USB
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
@@ -508,4 +512,5 @@ source "drivers/misc/mic/Kconfig"
 source "drivers/misc/genwqe/Kconfig"
 source "drivers/misc/echo/Kconfig"
 source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/cardreader/Kconfig"
 endmenu
index 5ca5f64..8d8cc09 100644 (file)
@@ -55,6 +55,7 @@ obj-$(CONFIG_CXL_BASE)                += cxl/
 obj-$(CONFIG_ASPEED_LPC_CTRL)  += aspeed-lpc-ctrl.o
 obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
 obj-$(CONFIG_PCI_ENDPOINT_TEST)        += pci_endpoint_test.o
+obj-$(CONFIG_MISC_RTSX)        += cardreader/
 
 lkdtm-$(CONFIG_LKDTM)          += lkdtm_core.o
 lkdtm-$(CONFIG_LKDTM)          += lkdtm_bugs.o
diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig
new file mode 100644 (file)
index 0000000..69e815e
--- /dev/null
@@ -0,0 +1,20 @@
+config MISC_RTSX_PCI
+       tristate "Realtek PCI-E card reader"
+       depends on PCI
+       select MFD_CORE
+       help
+         This supports for Realtek PCI-Express card reader including rts5209,
+         rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260.
+         Realtek card readers support access to many types of memory cards,
+         such as Memory Stick, Memory Stick Pro, Secure Digital and
+         MultiMediaCard.
+
+config MISC_RTSX_USB
+       tristate "Realtek USB card reader"
+       depends on USB
+       select MFD_CORE
+       help
+         Select this option to get support for Realtek USB 2.0 card readers
+         including RTS5129, RTS5139, RTS5179 and RTS5170.
+         Realtek card reader supports access to many types of memory cards,
+         such as Memory Stick Pro, Secure Digital and MultiMediaCard.
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
new file mode 100644 (file)
index 0000000..9fabfcc
--- /dev/null
@@ -0,0 +1,4 @@
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+
+obj-$(CONFIG_MISC_RTSX_PCI)    += rtsx_pci.o
+obj-$(CONFIG_MISC_RTSX_USB)    += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
new file mode 100644 (file)
index 0000000..434fd07
--- /dev/null
@@ -0,0 +1,508 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ *   Roger Tseng <rogerable@realtek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rtl8411_get_ic_version(struct rtsx_pcr *pcr)
+{
+       u8 val;
+
+       rtsx_pci_read_register(pcr, SYS_VER, &val);
+       return val & 0x0F;
+}
+
+static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
+{
+       u8 val = 0;
+
+       rtsx_pci_read_register(pcr, RTL8411B_PACKAGE_MODE, &val);
+
+       if (val & 0x2)
+               return 1;
+       else
+               return 0;
+}
+
+static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg1 = 0;
+       u8 reg3 = 0;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
+
+       if (!rtsx_vendor_setting_valid(reg1))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg1);
+       pcr->sd30_drive_sel_1v8 =
+               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg1));
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
+
+       rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, &reg3);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
+       pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
+}
+
+static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg = 0;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 =
+               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
+       pcr->sd30_drive_sel_3v3 =
+               map_sd_drive(rtl8411b_reg_to_sd30_drive_sel_3v3(reg));
+}
+
+static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
+}
+
+static int rtl8411_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_init_cmd(pcr);
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       0xFF, pcr->sd30_drive_sel_3v3);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
+                       CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
+
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rtl8411b_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_init_cmd(pcr);
+
+       if (rtl8411b_is_qfn48(pcr))
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                               CARD_PULL_CTL3, 0xFF, 0xF5);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       0xFF, pcr->sd30_drive_sel_3v3);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
+                       CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, FUNC_FORCE_CTL,
+                       0x06, 0x00);
+
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rtl8411_turn_on_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00);
+}
+
+static int rtl8411_turn_off_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01);
+}
+
+static int rtl8411_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D);
+}
+
+static int rtl8411_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00);
+}
+
+static int rtl8411_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       int err;
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CTL,
+                       BPP_LDO_POWB, BPP_LDO_SUSPEND);
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       /* To avoid too large in-rush current */
+       udelay(150);
+
+       err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+                       BPP_POWER_MASK, BPP_POWER_10_PERCENT_ON);
+       if (err < 0)
+               return err;
+
+       udelay(150);
+
+       err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+                       BPP_POWER_MASK, BPP_POWER_15_PERCENT_ON);
+       if (err < 0)
+               return err;
+
+       udelay(150);
+
+       err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+                       BPP_POWER_MASK, BPP_POWER_ON);
+       if (err < 0)
+               return err;
+
+       return rtsx_pci_write_register(pcr, LDO_CTL, BPP_LDO_POWB, BPP_LDO_ON);
+}
+
+static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+       int err;
+
+       err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+                       BPP_POWER_MASK, BPP_POWER_OFF);
+       if (err < 0)
+               return err;
+
+       return rtsx_pci_write_register(pcr, LDO_CTL,
+                       BPP_LDO_POWB, BPP_LDO_SUSPEND);
+}
+
+static int rtl8411_do_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage,
+               int bpp_tuned18_shift, int bpp_asic_1v8)
+{
+       u8 mask, val;
+       int err;
+
+       mask = (BPP_REG_TUNED18 << bpp_tuned18_shift) | BPP_PAD_MASK;
+       if (voltage == OUTPUT_3V3) {
+               err = rtsx_pci_write_register(pcr,
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
+               if (err < 0)
+                       return err;
+               val = (BPP_ASIC_3V3 << bpp_tuned18_shift) | BPP_PAD_3V3;
+       } else if (voltage == OUTPUT_1V8) {
+               err = rtsx_pci_write_register(pcr,
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
+               if (err < 0)
+                       return err;
+               val = (bpp_asic_1v8 << bpp_tuned18_shift) | BPP_PAD_1V8;
+       } else {
+               return -EINVAL;
+       }
+
+       return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
+}
+
+static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       return rtl8411_do_switch_output_voltage(pcr, voltage,
+                       BPP_TUNED18_SHIFT_8411, BPP_ASIC_1V8);
+}
+
+static int rtl8402_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       return rtl8411_do_switch_output_voltage(pcr, voltage,
+                       BPP_TUNED18_SHIFT_8402, BPP_ASIC_2V0);
+}
+
+static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
+{
+       unsigned int card_exist;
+
+       card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
+       card_exist &= CARD_EXIST;
+       if (!card_exist) {
+               /* Enable card CD */
+               rtsx_pci_write_register(pcr, CD_PAD_CTL,
+                               CD_DISABLE_MASK, CD_ENABLE);
+               /* Enable card interrupt */
+               rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x00);
+               return 0;
+       }
+
+       if (hweight32(card_exist) > 1) {
+               rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+                               BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
+               msleep(100);
+
+               card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
+               if (card_exist & MS_EXIST)
+                       card_exist = MS_EXIST;
+               else if (card_exist & SD_EXIST)
+                       card_exist = SD_EXIST;
+               else
+                       card_exist = 0;
+
+               rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+                               BPP_POWER_MASK, BPP_POWER_OFF);
+
+               pcr_dbg(pcr, "After CD deglitch, card_exist = 0x%x\n",
+                       card_exist);
+       }
+
+       if (card_exist & MS_EXIST) {
+               /* Disable SD interrupt */
+               rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x40);
+               rtsx_pci_write_register(pcr, CD_PAD_CTL,
+                               CD_DISABLE_MASK, MS_CD_EN_ONLY);
+       } else if (card_exist & SD_EXIST) {
+               /* Disable MS interrupt */
+               rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x80);
+               rtsx_pci_write_register(pcr, CD_PAD_CTL,
+                               CD_DISABLE_MASK, SD_CD_EN_ONLY);
+       }
+
+       return card_exist;
+}
+
+static int rtl8411_conv_clk_and_div_n(int input, int dir)
+{
+       int output;
+
+       if (dir == CLK_TO_DIV_N)
+               output = input * 4 / 5 - 2;
+       else
+               output = (input + 2) * 5 / 4;
+
+       return output;
+}
+
+static const struct pcr_ops rtl8411_pcr_ops = {
+       .fetch_vendor_settings = rtl8411_fetch_vendor_settings,
+       .extra_init_hw = rtl8411_extra_init_hw,
+       .optimize_phy = NULL,
+       .turn_on_led = rtl8411_turn_on_led,
+       .turn_off_led = rtl8411_turn_off_led,
+       .enable_auto_blink = rtl8411_enable_auto_blink,
+       .disable_auto_blink = rtl8411_disable_auto_blink,
+       .card_power_on = rtl8411_card_power_on,
+       .card_power_off = rtl8411_card_power_off,
+       .switch_output_voltage = rtl8411_switch_output_voltage,
+       .cd_deglitch = rtl8411_cd_deglitch,
+       .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+       .force_power_down = rtl8411_force_power_down,
+};
+
+static const struct pcr_ops rtl8402_pcr_ops = {
+       .fetch_vendor_settings = rtl8411_fetch_vendor_settings,
+       .extra_init_hw = rtl8411_extra_init_hw,
+       .optimize_phy = NULL,
+       .turn_on_led = rtl8411_turn_on_led,
+       .turn_off_led = rtl8411_turn_off_led,
+       .enable_auto_blink = rtl8411_enable_auto_blink,
+       .disable_auto_blink = rtl8411_disable_auto_blink,
+       .card_power_on = rtl8411_card_power_on,
+       .card_power_off = rtl8411_card_power_off,
+       .switch_output_voltage = rtl8402_switch_output_voltage,
+       .cd_deglitch = rtl8411_cd_deglitch,
+       .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+       .force_power_down = rtl8411_force_power_down,
+};
+
+static const struct pcr_ops rtl8411b_pcr_ops = {
+       .fetch_vendor_settings = rtl8411b_fetch_vendor_settings,
+       .extra_init_hw = rtl8411b_extra_init_hw,
+       .optimize_phy = NULL,
+       .turn_on_led = rtl8411_turn_on_led,
+       .turn_off_led = rtl8411_turn_off_led,
+       .enable_auto_blink = rtl8411_enable_auto_blink,
+       .disable_auto_blink = rtl8411_disable_auto_blink,
+       .card_power_on = rtl8411_card_power_on,
+       .card_power_off = rtl8411_card_power_off,
+       .switch_output_voltage = rtl8411_switch_output_voltage,
+       .cd_deglitch = rtl8411_cd_deglitch,
+       .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+       .force_power_down = rtl8411_force_power_down,
+};
+
+/* SD Pull Control Enable:
+ *     SD_DAT[3:0] ==> pull up
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull up
+ *     SD_CMD      ==> pull up
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rtl8411_sd_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xA9),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x09),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
+       0,
+};
+
+/* SD Pull Control Disable:
+ *     SD_DAT[3:0] ==> pull down
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull down
+ *     SD_CMD      ==> pull down
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rtl8411_sd_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
+       0,
+};
+
+/* MS Pull Control Enable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rtl8411_ms_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
+       0,
+};
+
+/* MS Pull Control Disable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rtl8411_ms_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
+       0,
+};
+
+static const u32 rtl8411b_qfn64_sd_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x09 | 0xD0),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
+       0,
+};
+
+static const u32 rtl8411b_qfn48_sd_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x69 | 0x90),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x08 | 0x11),
+       0,
+};
+
+static const u32 rtl8411b_qfn64_sd_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
+       0,
+};
+
+static const u32 rtl8411b_qfn48_sd_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
+       0,
+};
+
+static const u32 rtl8411b_qfn64_ms_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05 | 0x50),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
+       0,
+};
+
+static const u32 rtl8411b_qfn48_ms_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
+       0,
+};
+
+static const u32 rtl8411b_qfn64_ms_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
+       0,
+};
+
+static const u32 rtl8411b_qfn48_ms_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
+       0,
+};
+
+static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
+{
+       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+       pcr->num_slots = 2;
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+       pcr->aspm_en = ASPM_L1_EN;
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
+       pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
+       pcr->ic_version = rtl8411_get_ic_version(pcr);
+}
+
+void rtl8411_init_params(struct rtsx_pcr *pcr)
+{
+       rtl8411_init_common_params(pcr);
+       pcr->ops = &rtl8411_pcr_ops;
+       set_pull_ctrl_tables(pcr, rtl8411);
+}
+
+void rtl8411b_init_params(struct rtsx_pcr *pcr)
+{
+       rtl8411_init_common_params(pcr);
+       pcr->ops = &rtl8411b_pcr_ops;
+       if (rtl8411b_is_qfn48(pcr))
+               set_pull_ctrl_tables(pcr, rtl8411b_qfn48);
+       else
+               set_pull_ctrl_tables(pcr, rtl8411b_qfn64);
+}
+
+void rtl8402_init_params(struct rtsx_pcr *pcr)
+{
+       rtl8411_init_common_params(pcr);
+       pcr->ops = &rtl8402_pcr_ops;
+       set_pull_ctrl_tables(pcr, rtl8411);
+}
diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
new file mode 100644 (file)
index 0000000..ce68c48
--- /dev/null
@@ -0,0 +1,277 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
+{
+       u8 val;
+
+       val = rtsx_pci_readb(pcr, 0x1C);
+       return val & 0x0F;
+}
+
+static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (rts5209_vendor_setting1_valid(reg)) {
+               if (rts5209_reg_check_ms_pmos(reg))
+                       pcr->flags |= PCR_MS_PMOS;
+               pcr->aspm_en = rts5209_reg_to_aspm(reg);
+       }
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+       if (rts5209_vendor_setting2_valid(reg)) {
+               pcr->sd30_drive_sel_1v8 =
+                       rts5209_reg_to_sd30_drive_sel_1v8(reg);
+               pcr->sd30_drive_sel_3v3 =
+                       rts5209_reg_to_sd30_drive_sel_3v3(reg);
+               pcr->card_drive_sel = rts5209_reg_to_card_drive_sel(reg);
+       }
+}
+
+static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
+}
+
+static int rts5209_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_init_cmd(pcr);
+
+       /* Turn off LED */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO, 0xFF, 0x03);
+       /* Reset ASPM state to default value */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+       /* Force CLKREQ# PIN to drive 0 to request clock */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
+       /* Configure GPIO as output */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO_DIR, 0xFF, 0x03);
+       /* Configure driving */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       0xFF, pcr->sd30_drive_sel_3v3);
+
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5209_optimize_phy(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_phy_register(pcr, 0x00, 0xB966);
+}
+
+static int rts5209_turn_on_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00);
+}
+
+static int rts5209_turn_off_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01);
+}
+
+static int rts5209_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D);
+}
+
+static int rts5209_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00);
+}
+
+static int rts5209_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       int err;
+       u8 pwr_mask, partial_pwr_on, pwr_on;
+
+       pwr_mask = SD_POWER_MASK;
+       partial_pwr_on = SD_PARTIAL_POWER_ON;
+       pwr_on = SD_POWER_ON;
+
+       if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
+               pwr_mask = MS_POWER_MASK;
+               partial_pwr_on = MS_PARTIAL_POWER_ON;
+               pwr_on = MS_POWER_ON;
+       }
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       pwr_mask, partial_pwr_on);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x04);
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       /* To avoid too large in-rush current */
+       udelay(150);
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, pwr_mask, pwr_on);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x00);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+       u8 pwr_mask, pwr_off;
+
+       pwr_mask = SD_POWER_MASK;
+       pwr_off = SD_POWER_OFF;
+
+       if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
+               pwr_mask = MS_POWER_MASK;
+               pwr_off = MS_POWER_OFF;
+       }
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       pwr_mask | PMOS_STRG_MASK, pwr_off | PMOS_STRG_400mA);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x06);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       int err;
+
+       if (voltage == OUTPUT_3V3) {
+               err = rtsx_pci_write_register(pcr,
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
+               if (err < 0)
+                       return err;
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+               if (err < 0)
+                       return err;
+       } else if (voltage == OUTPUT_1V8) {
+               err = rtsx_pci_write_register(pcr,
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
+               if (err < 0)
+                       return err;
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+               if (err < 0)
+                       return err;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static const struct pcr_ops rts5209_pcr_ops = {
+       .fetch_vendor_settings = rts5209_fetch_vendor_settings,
+       .extra_init_hw = rts5209_extra_init_hw,
+       .optimize_phy = rts5209_optimize_phy,
+       .turn_on_led = rts5209_turn_on_led,
+       .turn_off_led = rts5209_turn_off_led,
+       .enable_auto_blink = rts5209_enable_auto_blink,
+       .disable_auto_blink = rts5209_disable_auto_blink,
+       .card_power_on = rts5209_card_power_on,
+       .card_power_off = rts5209_card_power_off,
+       .switch_output_voltage = rts5209_switch_output_voltage,
+       .cd_deglitch = NULL,
+       .conv_clk_and_div_n = NULL,
+       .force_power_down = rts5209_force_power_down,
+};
+
+/* SD Pull Control Enable:
+ *     SD_DAT[3:0] ==> pull up
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull up
+ *     SD_CMD      ==> pull up
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5209_sd_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+       0,
+};
+
+/* SD Pull Control Disable:
+ *     SD_DAT[3:0] ==> pull down
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull down
+ *     SD_CMD      ==> pull down
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5209_sd_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+       0,
+};
+
+/* MS Pull Control Enable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5209_ms_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+/* MS Pull Control Disable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5209_ms_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+void rts5209_init_params(struct rtsx_pcr *pcr)
+{
+       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 |
+               EXTRA_CAPS_SD_SDR104 | EXTRA_CAPS_MMC_8BIT;
+       pcr->num_slots = 2;
+       pcr->ops = &rts5209_pcr_ops;
+
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTS5209_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+       pcr->aspm_en = ASPM_L1_EN;
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
+       pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+       pcr->ic_version = rts5209_get_ic_version(pcr);
+       pcr->sd_pull_ctl_enable_tbl = rts5209_sd_pull_ctl_enable_tbl;
+       pcr->sd_pull_ctl_disable_tbl = rts5209_sd_pull_ctl_disable_tbl;
+       pcr->ms_pull_ctl_enable_tbl = rts5209_ms_pull_ctl_enable_tbl;
+       pcr->ms_pull_ctl_disable_tbl = rts5209_ms_pull_ctl_disable_tbl;
+}
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
new file mode 100644 (file)
index 0000000..024dcba
--- /dev/null
@@ -0,0 +1,374 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ *   Roger Tseng <rogerable@realtek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rts5227_get_ic_version(struct rtsx_pcr *pcr)
+{
+       u8 val;
+
+       rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+       return val & 0x0F;
+}
+
+static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+       u8 driving_3v3[4][3] = {
+               {0x13, 0x13, 0x13},
+               {0x96, 0x96, 0x96},
+               {0x7F, 0x7F, 0x7F},
+               {0x96, 0x96, 0x96},
+       };
+       u8 driving_1v8[4][3] = {
+               {0x99, 0x99, 0x99},
+               {0xAA, 0xAA, 0xAA},
+               {0xFE, 0xFE, 0xFE},
+               {0xB3, 0xB3, 0xB3},
+       };
+       u8 (*driving)[3], drive_sel;
+
+       if (voltage == OUTPUT_3V3) {
+               driving = driving_3v3;
+               drive_sel = pcr->sd30_drive_sel_3v3;
+       } else {
+               driving = driving_1v8;
+               drive_sel = pcr->sd30_drive_sel_1v8;
+       }
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+                       0xFF, driving[drive_sel][0]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+                       0xFF, driving[drive_sel][1]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+                       0xFF, driving[drive_sel][2]);
+}
+
+static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+       pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+       if (rtsx_reg_check_reverse_socket(reg))
+               pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rts5227_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       /* Set relink_time to 0 */
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
+
+       if (pm_state == HOST_ENTER_S3)
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x10);
+
+       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
+static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       u16 cap;
+
+       rtsx_pci_init_cmd(pcr);
+
+       /* Configure GPIO as output */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+       /* Reset ASPM state to default value */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+       /* Switch LDO3318 source from DV33 to card_3v3 */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
+       /* LED shine disabled, set initial shine cycle period */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+       /* Configure LTR */
+       pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cap);
+       if (cap & PCI_EXP_DEVCTL2_LTR_EN)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
+       /* Configure OBFF */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
+       /* Configure driving */
+       rts5227_fill_driving(pcr, OUTPUT_3V3);
+       /* Configure force_clock_req */
+       if (pcr->flags & PCR_REVERSE_SOCKET)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB8, 0xB8);
+       else
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB8, 0x88);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, pcr->reg_pm_ctrl3, 0x10, 0x00);
+
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5227_optimize_phy(struct rtsx_pcr *pcr)
+{
+       int err;
+
+       err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
+       if (err < 0)
+               return err;
+
+       /* Optimize RX sensitivity */
+       return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
+}
+
+static int rts5227_turn_on_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
+}
+
+static int rts5227_turn_off_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+}
+
+static int rts5227_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
+}
+
+static int rts5227_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
+}
+
+static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       int err;
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK, SD_PARTIAL_POWER_ON);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x02);
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       /* To avoid too large in-rush current */
+       udelay(150);
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK, SD_POWER_ON);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x06);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK | PMOS_STRG_MASK,
+                       SD_POWER_OFF | PMOS_STRG_400mA);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0X00);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       int err;
+
+       if (voltage == OUTPUT_3V3) {
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+               if (err < 0)
+                       return err;
+       } else if (voltage == OUTPUT_1V8) {
+               err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
+               if (err < 0)
+                       return err;
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C80 | 0x24);
+               if (err < 0)
+                       return err;
+       } else {
+               return -EINVAL;
+       }
+
+       /* set pad drive */
+       rtsx_pci_init_cmd(pcr);
+       rts5227_fill_driving(pcr, voltage);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static const struct pcr_ops rts5227_pcr_ops = {
+       .fetch_vendor_settings = rts5227_fetch_vendor_settings,
+       .extra_init_hw = rts5227_extra_init_hw,
+       .optimize_phy = rts5227_optimize_phy,
+       .turn_on_led = rts5227_turn_on_led,
+       .turn_off_led = rts5227_turn_off_led,
+       .enable_auto_blink = rts5227_enable_auto_blink,
+       .disable_auto_blink = rts5227_disable_auto_blink,
+       .card_power_on = rts5227_card_power_on,
+       .card_power_off = rts5227_card_power_off,
+       .switch_output_voltage = rts5227_switch_output_voltage,
+       .cd_deglitch = NULL,
+       .conv_clk_and_div_n = NULL,
+       .force_power_down = rts5227_force_power_down,
+};
+
+/* SD Pull Control Enable:
+ *     SD_DAT[3:0] ==> pull up
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull up
+ *     SD_CMD      ==> pull up
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5227_sd_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+       0,
+};
+
+/* SD Pull Control Disable:
+ *     SD_DAT[3:0] ==> pull down
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull down
+ *     SD_CMD      ==> pull down
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5227_sd_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+       0,
+};
+
+/* MS Pull Control Enable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5227_ms_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+/* MS Pull Control Disable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5227_ms_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+void rts5227_init_params(struct rtsx_pcr *pcr)
+{
+       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+       pcr->num_slots = 2;
+       pcr->ops = &rts5227_pcr_ops;
+
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+       pcr->aspm_en = ASPM_L1_EN;
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
+       pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
+
+       pcr->ic_version = rts5227_get_ic_version(pcr);
+       pcr->sd_pull_ctl_enable_tbl = rts5227_sd_pull_ctl_enable_tbl;
+       pcr->sd_pull_ctl_disable_tbl = rts5227_sd_pull_ctl_disable_tbl;
+       pcr->ms_pull_ctl_enable_tbl = rts5227_ms_pull_ctl_enable_tbl;
+       pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl;
+
+       pcr->reg_pm_ctrl3 = PM_CTRL3;
+}
+
+static int rts522a_optimize_phy(struct rtsx_pcr *pcr)
+{
+       int err;
+
+       err = rtsx_pci_write_register(pcr, RTS522A_PM_CTRL3, D3_DELINK_MODE_EN,
+               0x00);
+       if (err < 0)
+               return err;
+
+       if (is_version(pcr, 0x522A, IC_VER_A)) {
+               err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
+                       PHY_RCR2_INIT_27S);
+               if (err)
+                       return err;
+
+               rtsx_pci_write_phy_register(pcr, PHY_RCR1, PHY_RCR1_INIT_27S);
+               rtsx_pci_write_phy_register(pcr, PHY_FLD0, PHY_FLD0_INIT_27S);
+               rtsx_pci_write_phy_register(pcr, PHY_FLD3, PHY_FLD3_INIT_27S);
+               rtsx_pci_write_phy_register(pcr, PHY_FLD4, PHY_FLD4_INIT_27S);
+       }
+
+       return 0;
+}
+
+static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       rts5227_extra_init_hw(pcr);
+
+       rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
+               FUNC_FORCE_UPME_XMT_DBG);
+       rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
+       rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+       rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 0xFF, 0x11);
+
+       return 0;
+}
+
+/* rts522a operations mainly derived from rts5227, except phy/hw init setting.
+ */
+static const struct pcr_ops rts522a_pcr_ops = {
+       .fetch_vendor_settings = rts5227_fetch_vendor_settings,
+       .extra_init_hw = rts522a_extra_init_hw,
+       .optimize_phy = rts522a_optimize_phy,
+       .turn_on_led = rts5227_turn_on_led,
+       .turn_off_led = rts5227_turn_off_led,
+       .enable_auto_blink = rts5227_enable_auto_blink,
+       .disable_auto_blink = rts5227_disable_auto_blink,
+       .card_power_on = rts5227_card_power_on,
+       .card_power_off = rts5227_card_power_off,
+       .switch_output_voltage = rts5227_switch_output_voltage,
+       .cd_deglitch = NULL,
+       .conv_clk_and_div_n = NULL,
+       .force_power_down = rts5227_force_power_down,
+};
+
+void rts522a_init_params(struct rtsx_pcr *pcr)
+{
+       rts5227_init_params(pcr);
+
+       pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
+}
diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
new file mode 100644 (file)
index 0000000..9119261
--- /dev/null
@@ -0,0 +1,273 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
+{
+       u8 val;
+
+       rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+       return val & 0x0F;
+}
+
+static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 =
+               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+       pcr->sd30_drive_sel_3v3 =
+               map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
+}
+
+static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
+static int rts5229_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_init_cmd(pcr);
+
+       /* Configure GPIO as output */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+       /* Reset ASPM state to default value */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+       /* Force CLKREQ# PIN to drive 0 to request clock */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
+       /* Switch LDO3318 source from DV33 to card_3v3 */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
+       /* LED shine disabled, set initial shine cycle period */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+       /* Configure driving */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       0xFF, pcr->sd30_drive_sel_3v3);
+
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5229_optimize_phy(struct rtsx_pcr *pcr)
+{
+       /* Optimize RX sensitivity */
+       return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
+}
+
+static int rts5229_turn_on_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
+}
+
+static int rts5229_turn_off_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+}
+
+static int rts5229_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
+}
+
+static int rts5229_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
+}
+
+static int rts5229_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       int err;
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK, SD_PARTIAL_POWER_ON);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x02);
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       /* To avoid too large in-rush current */
+       udelay(150);
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK, SD_POWER_ON);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x06);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK | PMOS_STRG_MASK,
+                       SD_POWER_OFF | PMOS_STRG_400mA);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x00);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       int err;
+
+       if (voltage == OUTPUT_3V3) {
+               err = rtsx_pci_write_register(pcr,
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
+               if (err < 0)
+                       return err;
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+               if (err < 0)
+                       return err;
+       } else if (voltage == OUTPUT_1V8) {
+               err = rtsx_pci_write_register(pcr,
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
+               if (err < 0)
+                       return err;
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+               if (err < 0)
+                       return err;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static const struct pcr_ops rts5229_pcr_ops = {
+       .fetch_vendor_settings = rts5229_fetch_vendor_settings,
+       .extra_init_hw = rts5229_extra_init_hw,
+       .optimize_phy = rts5229_optimize_phy,
+       .turn_on_led = rts5229_turn_on_led,
+       .turn_off_led = rts5229_turn_off_led,
+       .enable_auto_blink = rts5229_enable_auto_blink,
+       .disable_auto_blink = rts5229_disable_auto_blink,
+       .card_power_on = rts5229_card_power_on,
+       .card_power_off = rts5229_card_power_off,
+       .switch_output_voltage = rts5229_switch_output_voltage,
+       .cd_deglitch = NULL,
+       .conv_clk_and_div_n = NULL,
+       .force_power_down = rts5229_force_power_down,
+};
+
+/* SD Pull Control Enable:
+ *     SD_DAT[3:0] ==> pull up
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull up
+ *     SD_CMD      ==> pull up
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5229_sd_pull_ctl_enable_tbl1[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+       0,
+};
+
+/* For RTS5229 version C */
+static const u32 rts5229_sd_pull_ctl_enable_tbl2[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD9),
+       0,
+};
+
+/* SD Pull Control Disable:
+ *     SD_DAT[3:0] ==> pull down
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull down
+ *     SD_CMD      ==> pull down
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5229_sd_pull_ctl_disable_tbl1[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+       0,
+};
+
+/* For RTS5229 version C */
+static const u32 rts5229_sd_pull_ctl_disable_tbl2[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE5),
+       0,
+};
+
+/* MS Pull Control Enable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5229_ms_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+/* MS Pull Control Disable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5229_ms_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+void rts5229_init_params(struct rtsx_pcr *pcr)
+{
+       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+       pcr->num_slots = 2;
+       pcr->ops = &rts5229_pcr_ops;
+
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+       pcr->aspm_en = ASPM_L1_EN;
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
+       pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
+
+       pcr->ic_version = rts5229_get_ic_version(pcr);
+       if (pcr->ic_version == IC_VER_C) {
+               pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl2;
+               pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl2;
+       } else {
+               pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl1;
+               pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl1;
+       }
+       pcr->ms_pull_ctl_enable_tbl = rts5229_ms_pull_ctl_enable_tbl;
+       pcr->ms_pull_ctl_disable_tbl = rts5229_ms_pull_ctl_disable_tbl;
+}
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
new file mode 100644 (file)
index 0000000..dbe013a
--- /dev/null
@@ -0,0 +1,740 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rts5249_get_ic_version(struct rtsx_pcr *pcr)
+{
+       u8 val;
+
+       rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+       return val & 0x0F;
+}
+
+static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+       u8 driving_3v3[4][3] = {
+               {0x11, 0x11, 0x18},
+               {0x55, 0x55, 0x5C},
+               {0xFF, 0xFF, 0xFF},
+               {0x96, 0x96, 0x96},
+       };
+       u8 driving_1v8[4][3] = {
+               {0xC4, 0xC4, 0xC4},
+               {0x3C, 0x3C, 0x3C},
+               {0xFE, 0xFE, 0xFE},
+               {0xB3, 0xB3, 0xB3},
+       };
+       u8 (*driving)[3], drive_sel;
+
+       if (voltage == OUTPUT_3V3) {
+               driving = driving_3v3;
+               drive_sel = pcr->sd30_drive_sel_3v3;
+       } else {
+               driving = driving_1v8;
+               drive_sel = pcr->sd30_drive_sel_1v8;
+       }
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+                       0xFF, driving[drive_sel][0]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+                       0xFF, driving[drive_sel][1]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+                       0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg)) {
+               pcr_dbg(pcr, "skip fetch vendor setting\n");
+               return;
+       }
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+       pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+       if (rtsx_reg_check_reverse_socket(reg))
+               pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       /* Set relink_time to 0 */
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
+
+       if (pm_state == HOST_ENTER_S3)
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+                       D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
+static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &(pcr->option);
+       u32 lval;
+
+       if (CHK_PCI_PID(pcr, PID_524A))
+               rtsx_pci_read_config_dword(pcr,
+                       PCR_ASPM_SETTING_REG1, &lval);
+       else
+               rtsx_pci_read_config_dword(pcr,
+                       PCR_ASPM_SETTING_REG2, &lval);
+
+       if (lval & ASPM_L1_1_EN_MASK)
+               rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+       if (lval & ASPM_L1_2_EN_MASK)
+               rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+       if (lval & PM_L1_1_EN_MASK)
+               rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+       if (lval & PM_L1_2_EN_MASK)
+               rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+       if (option->ltr_en) {
+               u16 val;
+
+               pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+               if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+                       option->ltr_enabled = true;
+                       option->ltr_active = true;
+                       rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+               } else {
+                       option->ltr_enabled = false;
+               }
+       }
+}
+
+static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &(pcr->option);
+
+       if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+                               | PM_L1_1_EN | PM_L1_2_EN))
+               option->force_clkreq_0 = false;
+       else
+               option->force_clkreq_0 = true;
+
+       return 0;
+}
+
+static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &(pcr->option);
+
+       rts5249_init_from_cfg(pcr);
+       rts5249_init_from_hw(pcr);
+
+       rtsx_pci_init_cmd(pcr);
+
+       /* Rest L1SUB Config */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
+       /* Configure GPIO as output */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+       /* Reset ASPM state to default value */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+       /* Switch LDO3318 source from DV33 to card_3v3 */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
+       /* LED shine disabled, set initial shine cycle period */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+       /* Configure driving */
+       rts5249_fill_driving(pcr, OUTPUT_3V3);
+       if (pcr->flags & PCR_REVERSE_SOCKET)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
+       else
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+
+       /*
+        * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+        * to drive low, and we forcibly request clock.
+        */
+       if (option->force_clkreq_0)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+                       FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+       else
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+                       FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+       return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+}
+
+static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
+{
+       int err;
+
+       err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
+       if (err < 0)
+               return err;
+
+       err = rtsx_pci_write_phy_register(pcr, PHY_REV,
+                       PHY_REV_RESV | PHY_REV_RXIDLE_LATCHED |
+                       PHY_REV_P1_EN | PHY_REV_RXIDLE_EN |
+                       PHY_REV_CLKREQ_TX_EN | PHY_REV_RX_PWST |
+                       PHY_REV_CLKREQ_DT_1_0 | PHY_REV_STOP_CLKRD |
+                       PHY_REV_STOP_CLKWR);
+       if (err < 0)
+               return err;
+
+       msleep(1);
+
+       err = rtsx_pci_write_phy_register(pcr, PHY_BPCR,
+                       PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL |
+                       PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN);
+       if (err < 0)
+               return err;
+
+       err = rtsx_pci_write_phy_register(pcr, PHY_PCR,
+                       PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
+                       PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 |
+                       PHY_PCR_RSSI_EN | PHY_PCR_RX10K);
+       if (err < 0)
+               return err;
+
+       err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
+                       PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR |
+                       PHY_RCR2_CDR_SR_2 | PHY_RCR2_FREQSEL_12 |
+                       PHY_RCR2_CDR_SC_12P | PHY_RCR2_CALIB_LATE);
+       if (err < 0)
+               return err;
+
+       err = rtsx_pci_write_phy_register(pcr, PHY_FLD4,
+                       PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF |
+                       PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA |
+                       PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER |
+                       PHY_FLD4_BER_CHK_EN);
+       if (err < 0)
+               return err;
+       err = rtsx_pci_write_phy_register(pcr, PHY_RDR,
+                       PHY_RDR_RXDSEL_1_9 | PHY_SSC_AUTO_PWD);
+       if (err < 0)
+               return err;
+       err = rtsx_pci_write_phy_register(pcr, PHY_RCR1,
+                       PHY_RCR1_ADP_TIME_4 | PHY_RCR1_VCO_COARSE);
+       if (err < 0)
+               return err;
+       err = rtsx_pci_write_phy_register(pcr, PHY_FLD3,
+                       PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 |
+                       PHY_FLD3_RXDELINK);
+       if (err < 0)
+               return err;
+
+       return rtsx_pci_write_phy_register(pcr, PHY_TUNE,
+                       PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 |
+                       PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 |
+                       PHY_TUNE_TUNED12 | PHY_TUNE_TUNEA12);
+}
+
+static int rtsx_base_turn_on_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
+}
+
+static int rtsx_base_turn_off_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+}
+
+static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
+}
+
+static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
+}
+
+static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       int err;
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK, SD_VCC_PARTIAL_POWER_ON);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x02);
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       msleep(5);
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK, SD_VCC_POWER_ON);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x06);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+                       SD_POWER_MASK, SD_POWER_OFF);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                       LDO3318_PWR_MASK, 0x00);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       int err;
+       u16 append;
+
+       switch (voltage) {
+       case OUTPUT_3V3:
+               err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK,
+                       PHY_TUNE_VOLTAGE_3V3);
+               if (err < 0)
+                       return err;
+               break;
+       case OUTPUT_1V8:
+               append = PHY_TUNE_D18_1V8;
+               if (CHK_PCI_PID(pcr, 0x5249)) {
+                       err = rtsx_pci_update_phy(pcr, PHY_BACR,
+                               PHY_BACR_BASIC_MASK, 0);
+                       if (err < 0)
+                               return err;
+                       append = PHY_TUNE_D18_1V7;
+               }
+
+               err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK,
+                       append);
+               if (err < 0)
+                       return err;
+               break;
+       default:
+               pcr_dbg(pcr, "unknown output voltage %d\n", voltage);
+               return -EINVAL;
+       }
+
+       /* set pad drive */
+       rtsx_pci_init_cmd(pcr);
+       rts5249_fill_driving(pcr, voltage);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static void rts5249_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+       u8 val = 0;
+
+       if (pcr->aspm_enabled == enable)
+               return;
+
+       if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+               if (enable)
+                       val = pcr->aspm_en;
+               rtsx_pci_update_cfg_byte(pcr,
+                       pcr->pcie_cap + PCI_EXP_LNKCTL,
+                       ASPM_MASK_NEG, val);
+       } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+               u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+               if (!enable)
+                       val = FORCE_ASPM_CTL0;
+               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+       }
+
+       pcr->aspm_enabled = enable;
+}
+
+static const struct pcr_ops rts5249_pcr_ops = {
+       .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+       .extra_init_hw = rts5249_extra_init_hw,
+       .optimize_phy = rts5249_optimize_phy,
+       .turn_on_led = rtsx_base_turn_on_led,
+       .turn_off_led = rtsx_base_turn_off_led,
+       .enable_auto_blink = rtsx_base_enable_auto_blink,
+       .disable_auto_blink = rtsx_base_disable_auto_blink,
+       .card_power_on = rtsx_base_card_power_on,
+       .card_power_off = rtsx_base_card_power_off,
+       .switch_output_voltage = rtsx_base_switch_output_voltage,
+       .force_power_down = rtsx_base_force_power_down,
+       .set_aspm = rts5249_set_aspm,
+};
+
+/* SD Pull Control Enable:
+ *     SD_DAT[3:0] ==> pull up
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull up
+ *     SD_CMD      ==> pull up
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5249_sd_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
+       0,
+};
+
+/* SD Pull Control Disable:
+ *     SD_DAT[3:0] ==> pull down
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull down
+ *     SD_CMD      ==> pull down
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5249_sd_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+       0,
+};
+
+/* MS Pull Control Enable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5249_ms_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+/* MS Pull Control Disable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5249_ms_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+void rts5249_init_params(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &(pcr->option);
+
+       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+       pcr->num_slots = 2;
+       pcr->ops = &rts5249_pcr_ops;
+
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+       pcr->aspm_en = ASPM_L1_EN;
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+       pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+       pcr->ic_version = rts5249_get_ic_version(pcr);
+       pcr->sd_pull_ctl_enable_tbl = rts5249_sd_pull_ctl_enable_tbl;
+       pcr->sd_pull_ctl_disable_tbl = rts5249_sd_pull_ctl_disable_tbl;
+       pcr->ms_pull_ctl_enable_tbl = rts5249_ms_pull_ctl_enable_tbl;
+       pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl;
+
+       pcr->reg_pm_ctrl3 = PM_CTRL3;
+
+       option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+                               | LTR_L1SS_PWR_GATE_EN);
+       option->ltr_en = true;
+
+       /* Init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+       option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+       option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+       option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+       option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+       option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+       option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
+       option->ltr_l1off_snooze_sspwrgate =
+               LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF;
+}
+
+static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
+{
+       addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr;
+
+       return __rtsx_pci_write_phy_register(pcr, addr, val);
+}
+
+static int rts524a_read_phy(struct rtsx_pcr *pcr, u8 addr, u16 *val)
+{
+       addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr;
+
+       return __rtsx_pci_read_phy_register(pcr, addr, val);
+}
+
+static int rts524a_optimize_phy(struct rtsx_pcr *pcr)
+{
+       int err;
+
+       err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
+               D3_DELINK_MODE_EN, 0x00);
+       if (err < 0)
+               return err;
+
+       rtsx_pci_write_phy_register(pcr, PHY_PCR,
+               PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
+               PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 | PHY_PCR_RSSI_EN);
+       rtsx_pci_write_phy_register(pcr, PHY_SSCCR3,
+               PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY);
+
+       if (is_version(pcr, 0x524A, IC_VER_A)) {
+               rtsx_pci_write_phy_register(pcr, PHY_SSCCR3,
+                       PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY);
+               rtsx_pci_write_phy_register(pcr, PHY_SSCCR2,
+                       PHY_SSCCR2_PLL_NCODE | PHY_SSCCR2_TIME0 |
+                       PHY_SSCCR2_TIME2_WIDTH);
+               rtsx_pci_write_phy_register(pcr, PHY_ANA1A,
+                       PHY_ANA1A_TXR_LOOPBACK | PHY_ANA1A_RXT_BIST |
+                       PHY_ANA1A_TXR_BIST | PHY_ANA1A_REV);
+               rtsx_pci_write_phy_register(pcr, PHY_ANA1D,
+                       PHY_ANA1D_DEBUG_ADDR);
+               rtsx_pci_write_phy_register(pcr, PHY_DIG1E,
+                       PHY_DIG1E_REV | PHY_DIG1E_D0_X_D1 |
+                       PHY_DIG1E_RX_ON_HOST | PHY_DIG1E_RCLK_REF_HOST |
+                       PHY_DIG1E_RCLK_TX_EN_KEEP |
+                       PHY_DIG1E_RCLK_TX_TERM_KEEP |
+                       PHY_DIG1E_RCLK_RX_EIDLE_ON | PHY_DIG1E_TX_TERM_KEEP |
+                       PHY_DIG1E_RX_TERM_KEEP | PHY_DIG1E_TX_EN_KEEP |
+                       PHY_DIG1E_RX_EN_KEEP);
+       }
+
+       rtsx_pci_write_phy_register(pcr, PHY_ANA08,
+               PHY_ANA08_RX_EQ_DCGAIN | PHY_ANA08_SEL_RX_EN |
+               PHY_ANA08_RX_EQ_VAL | PHY_ANA08_SCP | PHY_ANA08_SEL_IPI);
+
+       return 0;
+}
+
+static int rts524a_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       rts5249_extra_init_hw(pcr);
+
+       rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+               FORCE_ASPM_L1_EN, FORCE_ASPM_L1_EN);
+       rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+       rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_VCC_LMT_EN,
+               LDO_VCC_LMT_EN);
+       rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+       if (is_version(pcr, 0x524A, IC_VER_A)) {
+               rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+                       LDO_DV18_SR_MASK, LDO_DV18_SR_DF);
+               rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
+                       LDO_VCC_REF_TUNE_MASK, LDO_VCC_REF_1V2);
+               rtsx_pci_write_register(pcr, LDO_VIO_CFG,
+                       LDO_VIO_REF_TUNE_MASK, LDO_VIO_REF_1V2);
+               rtsx_pci_write_register(pcr, LDO_VIO_CFG,
+                       LDO_VIO_SR_MASK, LDO_VIO_SR_DF);
+               rtsx_pci_write_register(pcr, LDO_DV12S_CFG,
+                       LDO_REF12_TUNE_MASK, LDO_REF12_TUNE_DF);
+               rtsx_pci_write_register(pcr, SD40_LDO_CTL1,
+                       SD40_VIO_TUNE_MASK, SD40_VIO_TUNE_1V7);
+       }
+
+       return 0;
+}
+
+static void rts5250_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+       struct rtsx_cr_option *option = &(pcr->option);
+
+       u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+       int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+       int aspm_L1_1, aspm_L1_2;
+       u8 val = 0;
+
+       aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+       aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+       if (active) {
+               /* Run, latency: 60us */
+               if (aspm_L1_1)
+                       val = option->ltr_l1off_snooze_sspwrgate;
+       } else {
+               /* L1off, latency: 300us */
+               if (aspm_L1_2)
+                       val = option->ltr_l1off_sspwrgate;
+       }
+
+       if (aspm_L1_1 || aspm_L1_2) {
+               if (rtsx_check_dev_flag(pcr,
+                                       LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+                       if (card_exist)
+                               val &= ~L1OFF_MBIAS2_EN_5250;
+                       else
+                               val |= L1OFF_MBIAS2_EN_5250;
+               }
+       }
+       rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts524a_pcr_ops = {
+       .write_phy = rts524a_write_phy,
+       .read_phy = rts524a_read_phy,
+       .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+       .extra_init_hw = rts524a_extra_init_hw,
+       .optimize_phy = rts524a_optimize_phy,
+       .turn_on_led = rtsx_base_turn_on_led,
+       .turn_off_led = rtsx_base_turn_off_led,
+       .enable_auto_blink = rtsx_base_enable_auto_blink,
+       .disable_auto_blink = rtsx_base_disable_auto_blink,
+       .card_power_on = rtsx_base_card_power_on,
+       .card_power_off = rtsx_base_card_power_off,
+       .switch_output_voltage = rtsx_base_switch_output_voltage,
+       .force_power_down = rtsx_base_force_power_down,
+       .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
+       .set_aspm = rts5249_set_aspm,
+};
+
+void rts524a_init_params(struct rtsx_pcr *pcr)
+{
+       rts5249_init_params(pcr);
+       pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+       pcr->option.ltr_l1off_snooze_sspwrgate =
+               LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
+
+       pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+       pcr->ops = &rts524a_pcr_ops;
+}
+
+static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
+               LDO_VCC_TUNE_MASK, LDO_VCC_3V3);
+       return rtsx_base_card_power_on(pcr, card);
+}
+
+static int rts525a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       switch (voltage) {
+       case OUTPUT_3V3:
+               rtsx_pci_write_register(pcr, LDO_CONFIG2,
+                       LDO_D3318_MASK, LDO_D3318_33V);
+               rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+               break;
+       case OUTPUT_1V8:
+               rtsx_pci_write_register(pcr, LDO_CONFIG2,
+                       LDO_D3318_MASK, LDO_D3318_18V);
+               rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
+                       SD_IO_USING_1V8);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       rtsx_pci_init_cmd(pcr);
+       rts5249_fill_driving(pcr, voltage);
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts525a_optimize_phy(struct rtsx_pcr *pcr)
+{
+       int err;
+
+       err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
+               D3_DELINK_MODE_EN, 0x00);
+       if (err < 0)
+               return err;
+
+       rtsx_pci_write_phy_register(pcr, _PHY_FLD0,
+               _PHY_FLD0_CLK_REQ_20C | _PHY_FLD0_RX_IDLE_EN |
+               _PHY_FLD0_BIT_ERR_RSTN | _PHY_FLD0_BER_COUNT |
+               _PHY_FLD0_BER_TIMER | _PHY_FLD0_CHECK_EN);
+
+       rtsx_pci_write_phy_register(pcr, _PHY_ANA03,
+               _PHY_ANA03_TIMER_MAX | _PHY_ANA03_OOBS_DEB_EN |
+               _PHY_CMU_DEBUG_EN);
+
+       if (is_version(pcr, 0x525A, IC_VER_A))
+               rtsx_pci_write_phy_register(pcr, _PHY_REV0,
+                       _PHY_REV0_FILTER_OUT | _PHY_REV0_CDR_BYPASS_PFD |
+                       _PHY_REV0_CDR_RX_IDLE_BYPASS);
+
+       return 0;
+}
+
+static int rts525a_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       rts5249_extra_init_hw(pcr);
+
+       rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+       if (is_version(pcr, 0x525A, IC_VER_A)) {
+               rtsx_pci_write_register(pcr, L1SUB_CONFIG2,
+                       L1SUB_AUTO_CFG, L1SUB_AUTO_CFG);
+               rtsx_pci_write_register(pcr, RREF_CFG,
+                       RREF_VBGSEL_MASK, RREF_VBGSEL_1V25);
+               rtsx_pci_write_register(pcr, LDO_VIO_CFG,
+                       LDO_VIO_TUNE_MASK, LDO_VIO_1V7);
+               rtsx_pci_write_register(pcr, LDO_DV12S_CFG,
+                       LDO_D12_TUNE_MASK, LDO_D12_TUNE_DF);
+               rtsx_pci_write_register(pcr, LDO_AV12S_CFG,
+                       LDO_AV12S_TUNE_MASK, LDO_AV12S_TUNE_DF);
+               rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
+                       LDO_VCC_LMTVTH_MASK, LDO_VCC_LMTVTH_2A);
+               rtsx_pci_write_register(pcr, OOBS_CONFIG,
+                       OOBS_AUTOK_DIS | OOBS_VAL_MASK, 0x89);
+       }
+
+       return 0;
+}
+
+static const struct pcr_ops rts525a_pcr_ops = {
+       .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+       .extra_init_hw = rts525a_extra_init_hw,
+       .optimize_phy = rts525a_optimize_phy,
+       .turn_on_led = rtsx_base_turn_on_led,
+       .turn_off_led = rtsx_base_turn_off_led,
+       .enable_auto_blink = rtsx_base_enable_auto_blink,
+       .disable_auto_blink = rtsx_base_disable_auto_blink,
+       .card_power_on = rts525a_card_power_on,
+       .card_power_off = rtsx_base_card_power_off,
+       .switch_output_voltage = rts525a_switch_output_voltage,
+       .force_power_down = rtsx_base_force_power_down,
+       .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
+       .set_aspm = rts5249_set_aspm,
+};
+
+void rts525a_init_params(struct rtsx_pcr *pcr)
+{
+       rts5249_init_params(pcr);
+       pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+       pcr->option.ltr_l1off_snooze_sspwrgate =
+               LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
+
+       pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+       pcr->ops = &rts525a_pcr_ops;
+}
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
new file mode 100644 (file)
index 0000000..07cb93a
--- /dev/null
@@ -0,0 +1,748 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2016-2017 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Steven FENG <steven_feng@realsil.com.cn>
+ *   Rui FENG <rui_feng@realsil.com.cn>
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5260.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr)
+{
+       u8 val;
+
+       rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+       return val & IC_VERSION_MASK;
+}
+
+static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+       u8 driving_3v3[6][3] = {
+               {0x94, 0x94, 0x94},
+               {0x11, 0x11, 0x18},
+               {0x55, 0x55, 0x5C},
+               {0x94, 0x94, 0x94},
+               {0x94, 0x94, 0x94},
+               {0xFF, 0xFF, 0xFF},
+       };
+       u8 driving_1v8[6][3] = {
+               {0x9A, 0x89, 0x89},
+               {0xC4, 0xC4, 0xC4},
+               {0x3C, 0x3C, 0x3C},
+               {0x9B, 0x99, 0x99},
+               {0x9A, 0x89, 0x89},
+               {0xFE, 0xFE, 0xFE},
+       };
+       u8 (*driving)[3], drive_sel;
+
+       if (voltage == OUTPUT_3V3) {
+               driving = driving_3v3;
+               drive_sel = pcr->sd30_drive_sel_3v3;
+       } else {
+               driving = driving_1v8;
+               drive_sel = pcr->sd30_drive_sel_1v8;
+       }
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+                        0xFF, driving[drive_sel][0]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+                        0xFF, driving[drive_sel][1]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+                        0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg)) {
+               pcr_dbg(pcr, "skip fetch vendor setting\n");
+               return;
+       }
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+       pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+       if (rtsx_reg_check_reverse_socket(reg))
+               pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       /* Set relink_time to 0 */
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+                               RELINK_TIME_MASK, 0);
+
+       if (pm_state == HOST_ENTER_S3)
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+                                       D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+       rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
+static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+               LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+               LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5260_turn_on_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+               RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_ON);
+}
+
+static int rts5260_turn_off_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+               RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_OFF);
+}
+
+/* SD Pull Control Enable:
+ *     SD_DAT[3:0] ==> pull up
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull up
+ *     SD_CMD      ==> pull up
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
+       0,
+};
+
+/* SD Pull Control Disable:
+ *     SD_DAT[3:0] ==> pull down
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull down
+ *     SD_CMD      ==> pull down
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+       0,
+};
+
+/* MS Pull Control Enable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+/* MS Pull Control Disable:
+ *     MS CD       ==> pull up
+ *     others      ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+       0,
+};
+
+static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+               | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+       rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+       rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+                               CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+       rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+       return 0;
+}
+
+static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       int err = 0;
+       struct rtsx_cr_option *option = &pcr->option;
+
+       if (option->ocp_en)
+               rtsx_pci_enable_ocp(pcr);
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+                        DV331812_VDD1, DV331812_VDD1);
+       err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+       if (err < 0)
+               return err;
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0,
+                        RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+                        LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+                        DV331812_POWERON, DV331812_POWERON);
+       err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+       msleep(20);
+
+       if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+           pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+               sd_set_sample_push_timing_sd30(pcr);
+
+       /* Initialize SD_CFG1 register */
+       rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+                               SD_CLK_DIVIDE_128 | SD_20_MODE);
+
+       rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+                               0xFF, SD20_RX_POS_EDGE);
+       rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+       rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+                               SD_STOP | SD_CLR_ERR);
+
+       /* Reset SD_CFG3 register */
+       rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+       rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+                               SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+                               SD30_CLK_STOP_CFG0, 0);
+
+       rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
+
+       return err;
+}
+
+static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       switch (voltage) {
+       case OUTPUT_3V3:
+               rtsx_pci_write_register(pcr, LDO_CONFIG2,
+                                       DV331812_VDD1, DV331812_VDD1);
+               rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+                                       DV331812_MASK, DV331812_33);
+               rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+               break;
+       case OUTPUT_1V8:
+               rtsx_pci_write_register(pcr, LDO_CONFIG2,
+                                       DV331812_VDD1, DV331812_VDD1);
+               rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+                                       DV331812_MASK, DV331812_17);
+               rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
+                                       SD_IO_USING_1V8);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* set pad drive */
+       rtsx_pci_init_cmd(pcr);
+       rts5260_fill_driving(pcr, voltage);
+       return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+}
+
+static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+       rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+       rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+                               RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+                               RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+       rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       rts5260_stop_cmd(pcr);
+       rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
+
+       if (option->ocp_en)
+               rtsx_pci_disable_ocp(pcr);
+}
+
+static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+       int err = 0;
+
+       rts5260_card_before_power_off(pcr);
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+                        LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+                        DV331812_POWERON, DV331812_POWEROFF);
+       err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+       return err;
+}
+
+static void rts5260_init_ocp(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       if (option->ocp_en) {
+               u8 mask, val;
+
+               rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+                                       RTS5260_DVCC_OCP_EN |
+                                       RTS5260_DVCC_OCP_CL_EN,
+                                       RTS5260_DVCC_OCP_EN |
+                                       RTS5260_DVCC_OCP_CL_EN);
+               rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+                                       RTS5260_DVIO_OCP_EN |
+                                       RTS5260_DVIO_OCP_CL_EN,
+                                       RTS5260_DVIO_OCP_EN |
+                                       RTS5260_DVIO_OCP_CL_EN);
+
+               rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+                                       RTS5260_DVCC_OCP_THD_MASK,
+                                       option->sd_400mA_ocp_thd);
+
+               rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+                                       RTS5260_DVIO_OCP_THD_MASK,
+                                       RTS5260_DVIO_OCP_THD_350);
+
+               rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
+                                       RTS5260_DV331812_OCP_THD_MASK,
+                                       RTS5260_DV331812_OCP_THD_210);
+
+               mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+               val = pcr->hw_param.ocp_glitch;
+               rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+               rtsx_pci_enable_ocp(pcr);
+       } else {
+               rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+                                       RTS5260_DVCC_OCP_EN |
+                                       RTS5260_DVCC_OCP_CL_EN, 0);
+               rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+                                       RTS5260_DVIO_OCP_EN |
+                                       RTS5260_DVIO_OCP_CL_EN, 0);
+       }
+}
+
+static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
+{
+       u8 val = 0;
+
+       rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+
+       val = SD_OCP_INT_EN | SD_DETECT_EN;
+       val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+       rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+       rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+                               DV3318_DETECT_EN | DV3318_OCP_INT_EN,
+                               DV3318_DETECT_EN | DV3318_OCP_INT_EN);
+}
+
+static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
+{
+       u8 mask = 0;
+
+       mask = SD_OCP_INT_EN | SD_DETECT_EN;
+       mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+       rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+       rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+                               DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
+
+       rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+                               OC_POWER_DOWN);
+}
+
+int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+       return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+int rts5260_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+       return rtsx_pci_read_register(pcr, REG_DV3318_OCPSTAT, val);
+}
+
+void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+       u8 mask = 0;
+       u8 val = 0;
+
+       mask = SD_OCP_INT_CLR | SD_OC_CLR;
+       mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+       val = SD_OCP_INT_CLR | SD_OC_CLR;
+       val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+
+       rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+       rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+                               DV3318_OCP_INT_CLR | DV3318_OCP_CLR,
+                               DV3318_OCP_INT_CLR | DV3318_OCP_CLR);
+       udelay(10);
+       rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+       rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+                               DV3318_OCP_INT_CLR | DV3318_OCP_CLR, 0);
+}
+
+void rts5260_process_ocp(struct rtsx_pcr *pcr)
+{
+       if (!pcr->option.ocp_en)
+               return;
+
+       rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+       rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
+       if (pcr->card_exist & SD_EXIST)
+               rtsx_sd_power_off_card3v3(pcr);
+       else if (pcr->card_exist & MS_EXIST)
+               rtsx_ms_power_off_card3v3(pcr);
+
+       if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) {
+               if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+                       SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+                       (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
+                       rtsx_pci_clear_ocpstat(pcr);
+               pcr->ocp_stat = 0;
+               pcr->ocp_stat2 = 0;
+       }
+
+       if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+                       SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+                       (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
+               if (pcr->card_exist & SD_EXIST)
+                       rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+               else if (pcr->card_exist & MS_EXIST)
+                       rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+       }
+}
+
+int rts5260_init_hw(struct rtsx_pcr *pcr)
+{
+       int err;
+
+       rtsx_pci_init_ocp(pcr);
+
+       rtsx_pci_init_cmd(pcr);
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
+                        AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+       /* Rest L1SUB Config */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CLK_FORCE_CTL,
+                        CLK_PM_EN, CLK_PM_EN);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWD_SUSPEND_EN, 0xFF, 0xFF);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+                        PWR_GATE_EN, PWR_GATE_EN);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, REG_VREF,
+                        PWD_SUSPND_EN, PWD_SUSPND_EN);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RBCTL,
+                        U_AUTO_DMA_EN_MASK, U_AUTO_DMA_DISABLE);
+
+       if (pcr->flags & PCR_REVERSE_SOCKET)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
+       else
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG,
+                        OBFF_EN_MASK, OBFF_DISABLE);
+
+       err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
+{
+       int lss_l1_1, lss_l1_2;
+
+       lss_l1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN)
+                       | rtsx_check_dev_flag(pcr, PM_L1_1_EN);
+       lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN)
+                       | rtsx_check_dev_flag(pcr, PM_L1_2_EN);
+
+       if (lss_l1_2) {
+               pcr_dbg(pcr, "Set parameters for L1.2.");
+               rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+                                       0xFF, PCIE_L1_2_EN);
+               rtsx_pci_write_register(pcr, PWR_FE_CTL,
+                                       0xFF, PCIE_L1_2_PD_FE_EN);
+       } else if (lss_l1_1) {
+               pcr_dbg(pcr, "Set parameters for L1.1.");
+               rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+                                       0xFF, PCIE_L1_1_EN);
+               rtsx_pci_write_register(pcr, PWR_FE_CTL,
+                                       0xFF, PCIE_L1_1_PD_FE_EN);
+       } else {
+               pcr_dbg(pcr, "Set parameters for L1.");
+               rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+                                       0xFF, PCIE_L1_0_EN);
+               rtsx_pci_write_register(pcr, PWR_FE_CTL,
+                                       0xFF, PCIE_L1_0_PD_FE_EN);
+       }
+
+       rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_DPHY_RET_VALUE,
+                               0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+       rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_MAC_RET_VALUE,
+                               0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+       rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD30_RET_VALUE,
+                               0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+       rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD40_RET_VALUE,
+                               0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+       rtsx_pci_write_register(pcr, CFG_L1_0_SYS_RET_VALUE,
+                               0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+       /*Option cut APHY*/
+       rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_0,
+                               0xFF, CFG_PCIE_APHY_OFF_0_DEFAULT);
+       rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_1,
+                               0xFF, CFG_PCIE_APHY_OFF_1_DEFAULT);
+       rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_2,
+                               0xFF, CFG_PCIE_APHY_OFF_2_DEFAULT);
+       rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_3,
+                               0xFF, CFG_PCIE_APHY_OFF_3_DEFAULT);
+       /*CDR DEC*/
+       rtsx_pci_write_register(pcr, PWC_CDR, 0xFF, PWC_CDR_DEFAULT);
+       /*PWMPFM*/
+       rtsx_pci_write_register(pcr, CFG_LP_FPWM_VALUE,
+                               0xFF, CFG_LP_FPWM_VALUE_DEFAULT);
+       /*No Power Saving WA*/
+       rtsx_pci_write_register(pcr, CFG_L1_0_CRC_MISC_RET_VALUE,
+                               0xFF, CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT);
+}
+
+static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+       u32 lval;
+
+       rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+
+       if (lval & ASPM_L1_1_EN_MASK)
+               rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+       if (lval & ASPM_L1_2_EN_MASK)
+               rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+       if (lval & PM_L1_1_EN_MASK)
+               rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+       if (lval & PM_L1_2_EN_MASK)
+               rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+       rts5260_pwr_saving_setting(pcr);
+
+       if (option->ltr_en) {
+               u16 val;
+
+               pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+               if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+                       option->ltr_enabled = true;
+                       option->ltr_active = true;
+                       rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+               } else {
+                       option->ltr_enabled = false;
+               }
+       }
+
+       if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+                               | PM_L1_1_EN | PM_L1_2_EN))
+               option->force_clkreq_0 = false;
+       else
+               option->force_clkreq_0 = true;
+}
+
+static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+       rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+       rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+       rts5260_init_from_cfg(pcr);
+
+       /* force no MDIO*/
+       rtsx_pci_write_register(pcr, RTS5260_AUTOLOAD_CFG4,
+                               0xFF, RTS5260_MIMO_DISABLE);
+       /*Modify SDVCC Tune Default Parameters!*/
+       rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
+                               RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+
+       rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+       rts5260_init_hw(pcr);
+
+       /*
+        * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+        * to drive low, and we forcibly request clock.
+        */
+       if (option->force_clkreq_0)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+                                FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+       else
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+                                FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+       return 0;
+}
+
+void rts5260_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+       u8 val = 0;
+
+       if (pcr->aspm_enabled == enable)
+               return;
+
+       if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+               if (enable)
+                       val = pcr->aspm_en;
+               rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+                                        ASPM_MASK_NEG, val);
+       } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+               u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+               if (!enable)
+                       val = FORCE_ASPM_CTL0;
+               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+       }
+
+       pcr->aspm_enabled = enable;
+}
+
+static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+       u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+       int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+       int aspm_L1_1, aspm_L1_2;
+       u8 val = 0;
+
+       aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+       aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+       if (active) {
+               /* run, latency: 60us */
+               if (aspm_L1_1)
+                       val = option->ltr_l1off_snooze_sspwrgate;
+       } else {
+               /* l1off, latency: 300us */
+               if (aspm_L1_2)
+                       val = option->ltr_l1off_sspwrgate;
+       }
+
+       if (aspm_L1_1 || aspm_L1_2) {
+               if (rtsx_check_dev_flag(pcr,
+                                       LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+                       if (card_exist)
+                               val &= ~L1OFF_MBIAS2_EN_5250;
+                       else
+                               val |= L1OFF_MBIAS2_EN_5250;
+               }
+       }
+       rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5260_pcr_ops = {
+       .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+       .turn_on_led = rts5260_turn_on_led,
+       .turn_off_led = rts5260_turn_off_led,
+       .extra_init_hw = rts5260_extra_init_hw,
+       .enable_auto_blink = rtsx_base_enable_auto_blink,
+       .disable_auto_blink = rtsx_base_disable_auto_blink,
+       .card_power_on = rts5260_card_power_on,
+       .card_power_off = rts5260_card_power_off,
+       .switch_output_voltage = rts5260_switch_output_voltage,
+       .force_power_down = rtsx_base_force_power_down,
+       .stop_cmd = rts5260_stop_cmd,
+       .set_aspm = rts5260_set_aspm,
+       .set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
+       .enable_ocp = rts5260_enable_ocp,
+       .disable_ocp = rts5260_disable_ocp,
+       .init_ocp = rts5260_init_ocp,
+       .process_ocp = rts5260_process_ocp,
+       .get_ocpstat = rts5260_get_ocpstat,
+       .clear_ocpstat = rts5260_clear_ocpstat,
+};
+
+void rts5260_init_params(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+       struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+       pcr->num_slots = 2;
+
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+       pcr->aspm_en = ASPM_L1_EN;
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+       pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+       pcr->ic_version = rts5260_get_ic_version(pcr);
+       pcr->sd_pull_ctl_enable_tbl = rts5260_sd_pull_ctl_enable_tbl;
+       pcr->sd_pull_ctl_disable_tbl = rts5260_sd_pull_ctl_disable_tbl;
+       pcr->ms_pull_ctl_enable_tbl = rts5260_ms_pull_ctl_enable_tbl;
+       pcr->ms_pull_ctl_disable_tbl = rts5260_ms_pull_ctl_disable_tbl;
+
+       pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+
+       pcr->ops = &rts5260_pcr_ops;
+
+       option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+                               | LTR_L1SS_PWR_GATE_EN);
+       option->ltr_en = true;
+
+       /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+       option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+       option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+       option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+       option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+       option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+       option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+       option->ltr_l1off_snooze_sspwrgate =
+               LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
+
+       option->ocp_en = 1;
+       if (option->ocp_en)
+               hw_param->interrupt_en |= SD_OC_INT_EN;
+       hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U;
+       option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
+       option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
+}
diff --git a/drivers/misc/cardreader/rts5260.h b/drivers/misc/cardreader/rts5260.h
new file mode 100644 (file)
index 0000000..53a1411
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef __RTS5260_H__
+#define __RTS5260_H__
+
+#define RTS5260_DVCC_CTRL              0xFF73
+#define RTS5260_DVCC_OCP_EN            (0x01 << 7)
+#define RTS5260_DVCC_OCP_THD_MASK      (0x07 << 4)
+#define RTS5260_DVCC_POWERON           (0x01 << 3)
+#define RTS5260_DVCC_OCP_CL_EN         (0x01 << 2)
+
+#define RTS5260_DVIO_CTRL              0xFF75
+#define RTS5260_DVIO_OCP_EN            (0x01 << 7)
+#define RTS5260_DVIO_OCP_THD_MASK      (0x07 << 4)
+#define RTS5260_DVIO_POWERON           (0x01 << 3)
+#define RTS5260_DVIO_OCP_CL_EN         (0x01 << 2)
+
+#define RTS5260_DV331812_CFG           0xFF71
+#define RTS5260_DV331812_OCP_EN                (0x01 << 7)
+#define RTS5260_DV331812_OCP_THD_MASK  (0x07 << 4)
+#define RTS5260_DV331812_POWERON       (0x01 << 3)
+#define RTS5260_DV331812_SEL           (0x01 << 2)
+#define RTS5260_DV331812_VDD1          (0x01 << 2)
+#define RTS5260_DV331812_VDD2          (0x00 << 2)
+
+#define RTS5260_DV331812_OCP_THD_120   (0x00 << 4)
+#define RTS5260_DV331812_OCP_THD_140   (0x01 << 4)
+#define RTS5260_DV331812_OCP_THD_160   (0x02 << 4)
+#define RTS5260_DV331812_OCP_THD_180   (0x03 << 4)
+#define RTS5260_DV331812_OCP_THD_210   (0x04 << 4)
+#define RTS5260_DV331812_OCP_THD_240   (0x05 << 4)
+#define RTS5260_DV331812_OCP_THD_270   (0x06 << 4)
+#define RTS5260_DV331812_OCP_THD_300   (0x07 << 4)
+
+#define RTS5260_DVIO_OCP_THD_250       (0x00 << 4)
+#define RTS5260_DVIO_OCP_THD_300       (0x01 << 4)
+#define RTS5260_DVIO_OCP_THD_350       (0x02 << 4)
+#define RTS5260_DVIO_OCP_THD_400       (0x03 << 4)
+#define RTS5260_DVIO_OCP_THD_450       (0x04 << 4)
+#define RTS5260_DVIO_OCP_THD_500       (0x05 << 4)
+#define RTS5260_DVIO_OCP_THD_550       (0x06 << 4)
+#define RTS5260_DVIO_OCP_THD_600       (0x07 << 4)
+
+#define RTS5260_DVCC_OCP_THD_550       (0x00 << 4)
+#define RTS5260_DVCC_OCP_THD_970       (0x05 << 4)
+
+#endif
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
new file mode 100644 (file)
index 0000000..fd09b09
--- /dev/null
@@ -0,0 +1,1693 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/rtsx_pci.h>
+#include <linux/mmc/card.h>
+#include <asm/unaligned.h>
+
+#include "rtsx_pcr.h"
+
+static bool msi_en = true;
+module_param(msi_en, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msi_en, "Enable MSI");
+
+static DEFINE_IDR(rtsx_pci_idr);
+static DEFINE_SPINLOCK(rtsx_pci_lock);
+
+static struct mfd_cell rtsx_pcr_cells[] = {
+       [RTSX_SD_CARD] = {
+               .name = DRV_NAME_RTSX_PCI_SDMMC,
+       },
+       [RTSX_MS_CARD] = {
+               .name = DRV_NAME_RTSX_PCI_MS,
+       },
+};
+
+static const struct pci_device_id rtsx_pci_ids[] = {
+       { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
+
+static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+               0xFC, pcr->aspm_en);
+}
+
+static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+               0xFC, 0);
+}
+
+int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+{
+       rtsx_pci_write_register(pcr, MSGTXDATA0,
+                               MASK_8_BIT_DEF, (u8) (latency & 0xFF));
+       rtsx_pci_write_register(pcr, MSGTXDATA1,
+                               MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
+       rtsx_pci_write_register(pcr, MSGTXDATA2,
+                               MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
+       rtsx_pci_write_register(pcr, MSGTXDATA3,
+                               MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
+       rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
+               LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
+
+       return 0;
+}
+
+int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+{
+       if (pcr->ops->set_ltr_latency)
+               return pcr->ops->set_ltr_latency(pcr, latency);
+       else
+               return rtsx_comm_set_ltr_latency(pcr, latency);
+}
+
+static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       if (pcr->aspm_enabled == enable)
+               return;
+
+       if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+               if (enable)
+                       rtsx_pci_enable_aspm(pcr);
+               else
+                       rtsx_pci_disable_aspm(pcr);
+       } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+               u8 mask = FORCE_ASPM_VAL_MASK;
+               u8 val = 0;
+
+               if (enable)
+                       val = pcr->aspm_en;
+               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL,  mask, val);
+       }
+
+       pcr->aspm_enabled = enable;
+}
+
+static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
+{
+       if (pcr->ops->set_aspm)
+               pcr->ops->set_aspm(pcr, false);
+       else
+               rtsx_comm_set_aspm(pcr, false);
+}
+
+int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
+{
+       rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
+
+       return 0;
+}
+
+void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
+{
+       if (pcr->ops->set_l1off_cfg_sub_d0)
+               pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
+}
+
+static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       rtsx_disable_aspm(pcr);
+
+       if (option->ltr_enabled)
+               rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+
+       if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+               rtsx_set_l1off_sub_cfg_d0(pcr, 1);
+}
+
+void rtsx_pm_full_on(struct rtsx_pcr *pcr)
+{
+       if (pcr->ops->full_on)
+               pcr->ops->full_on(pcr);
+       else
+               rtsx_comm_pm_full_on(pcr);
+}
+
+void rtsx_pci_start_run(struct rtsx_pcr *pcr)
+{
+       /* If pci device removed, don't queue idle work any more */
+       if (pcr->remove_pci)
+               return;
+
+       if (pcr->state != PDEV_STAT_RUN) {
+               pcr->state = PDEV_STAT_RUN;
+               if (pcr->ops->enable_auto_blink)
+                       pcr->ops->enable_auto_blink(pcr);
+               rtsx_pm_full_on(pcr);
+       }
+
+       mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
+
+int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
+{
+       int i;
+       u32 val = HAIMR_WRITE_START;
+
+       val |= (u32)(addr & 0x3FFF) << 16;
+       val |= (u32)mask << 8;
+       val |= (u32)data;
+
+       rtsx_pci_writel(pcr, RTSX_HAIMR, val);
+
+       for (i = 0; i < MAX_RW_REG_CNT; i++) {
+               val = rtsx_pci_readl(pcr, RTSX_HAIMR);
+               if ((val & HAIMR_TRANS_END) == 0) {
+                       if (data != (u8)val)
+                               return -EIO;
+                       return 0;
+               }
+       }
+
+       return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
+
+int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
+{
+       u32 val = HAIMR_READ_START;
+       int i;
+
+       val |= (u32)(addr & 0x3FFF) << 16;
+       rtsx_pci_writel(pcr, RTSX_HAIMR, val);
+
+       for (i = 0; i < MAX_RW_REG_CNT; i++) {
+               val = rtsx_pci_readl(pcr, RTSX_HAIMR);
+               if ((val & HAIMR_TRANS_END) == 0)
+                       break;
+       }
+
+       if (i >= MAX_RW_REG_CNT)
+               return -ETIMEDOUT;
+
+       if (data)
+               *data = (u8)(val & 0xFF);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
+
+int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
+{
+       int err, i, finished = 0;
+       u8 tmp;
+
+       rtsx_pci_init_cmd(pcr);
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
+
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i < 100000; i++) {
+               err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
+               if (err < 0)
+                       return err;
+
+               if (!(tmp & 0x80)) {
+                       finished = 1;
+                       break;
+               }
+       }
+
+       if (!finished)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
+{
+       if (pcr->ops->write_phy)
+               return pcr->ops->write_phy(pcr, addr, val);
+
+       return __rtsx_pci_write_phy_register(pcr, addr, val);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
+
+int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
+{
+       int err, i, finished = 0;
+       u16 data;
+       u8 *ptr, tmp;
+
+       rtsx_pci_init_cmd(pcr);
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
+
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i < 100000; i++) {
+               err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
+               if (err < 0)
+                       return err;
+
+               if (!(tmp & 0x80)) {
+                       finished = 1;
+                       break;
+               }
+       }
+
+       if (!finished)
+               return -ETIMEDOUT;
+
+       rtsx_pci_init_cmd(pcr);
+
+       rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
+       rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
+
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       ptr = rtsx_pci_get_cmd_data(pcr);
+       data = ((u16)ptr[1] << 8) | ptr[0];
+
+       if (val)
+               *val = data;
+
+       return 0;
+}
+
+int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
+{
+       if (pcr->ops->read_phy)
+               return pcr->ops->read_phy(pcr, addr, val);
+
+       return __rtsx_pci_read_phy_register(pcr, addr, val);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
+
+void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
+{
+       if (pcr->ops->stop_cmd)
+               return pcr->ops->stop_cmd(pcr);
+
+       rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+       rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+
+       rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
+       rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
+
+void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
+               u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
+{
+       unsigned long flags;
+       u32 val = 0;
+       u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
+
+       val |= (u32)(cmd_type & 0x03) << 30;
+       val |= (u32)(reg_addr & 0x3FFF) << 16;
+       val |= (u32)mask << 8;
+       val |= (u32)data;
+
+       spin_lock_irqsave(&pcr->lock, flags);
+       ptr += pcr->ci;
+       if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
+               put_unaligned_le32(val, ptr);
+               ptr++;
+               pcr->ci++;
+       }
+       spin_unlock_irqrestore(&pcr->lock, flags);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
+
+void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
+{
+       u32 val = 1 << 31;
+
+       rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
+
+       val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
+       /* Hardware Auto Response */
+       val |= 0x40000000;
+       rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
+
+int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
+{
+       struct completion trans_done;
+       u32 val = 1 << 31;
+       long timeleft;
+       unsigned long flags;
+       int err = 0;
+
+       spin_lock_irqsave(&pcr->lock, flags);
+
+       /* set up data structures for the wakeup system */
+       pcr->done = &trans_done;
+       pcr->trans_result = TRANS_NOT_READY;
+       init_completion(&trans_done);
+
+       rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
+
+       val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
+       /* Hardware Auto Response */
+       val |= 0x40000000;
+       rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
+
+       spin_unlock_irqrestore(&pcr->lock, flags);
+
+       /* Wait for TRANS_OK_INT */
+       timeleft = wait_for_completion_interruptible_timeout(
+                       &trans_done, msecs_to_jiffies(timeout));
+       if (timeleft <= 0) {
+               pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
+               err = -ETIMEDOUT;
+               goto finish_send_cmd;
+       }
+
+       spin_lock_irqsave(&pcr->lock, flags);
+       if (pcr->trans_result == TRANS_RESULT_FAIL)
+               err = -EINVAL;
+       else if (pcr->trans_result == TRANS_RESULT_OK)
+               err = 0;
+       else if (pcr->trans_result == TRANS_NO_DEVICE)
+               err = -ENODEV;
+       spin_unlock_irqrestore(&pcr->lock, flags);
+
+finish_send_cmd:
+       spin_lock_irqsave(&pcr->lock, flags);
+       pcr->done = NULL;
+       spin_unlock_irqrestore(&pcr->lock, flags);
+
+       if ((err < 0) && (err != -ENODEV))
+               rtsx_pci_stop_cmd(pcr);
+
+       if (pcr->finish_me)
+               complete(pcr->finish_me);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
+
+static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
+               dma_addr_t addr, unsigned int len, int end)
+{
+       u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
+       u64 val;
+       u8 option = SG_VALID | SG_TRANS_DATA;
+
+       pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
+
+       if (end)
+               option |= SG_END;
+       val = ((u64)addr << 32) | ((u64)len << 12) | option;
+
+       put_unaligned_le64(val, ptr);
+       pcr->sgi++;
+}
+
+int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+               int num_sg, bool read, int timeout)
+{
+       int err = 0, count;
+
+       pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
+       count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
+       if (count < 1)
+               return -EINVAL;
+       pcr_dbg(pcr, "DMA mapping count: %d\n", count);
+
+       err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
+
+       rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
+
+int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+               int num_sg, bool read)
+{
+       enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+       if (pcr->remove_pci)
+               return -EINVAL;
+
+       if ((sglist == NULL) || (num_sg <= 0))
+               return -EINVAL;
+
+       return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
+
+void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+               int num_sg, bool read)
+{
+       enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+       dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
+
+int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+               int count, bool read, int timeout)
+{
+       struct completion trans_done;
+       struct scatterlist *sg;
+       dma_addr_t addr;
+       long timeleft;
+       unsigned long flags;
+       unsigned int len;
+       int i, err = 0;
+       u32 val;
+       u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
+
+       if (pcr->remove_pci)
+               return -ENODEV;
+
+       if ((sglist == NULL) || (count < 1))
+               return -EINVAL;
+
+       val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
+       pcr->sgi = 0;
+       for_each_sg(sglist, sg, count, i) {
+               addr = sg_dma_address(sg);
+               len = sg_dma_len(sg);
+               rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
+       }
+
+       spin_lock_irqsave(&pcr->lock, flags);
+
+       pcr->done = &trans_done;
+       pcr->trans_result = TRANS_NOT_READY;
+       init_completion(&trans_done);
+       rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
+       rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
+
+       spin_unlock_irqrestore(&pcr->lock, flags);
+
+       timeleft = wait_for_completion_interruptible_timeout(
+                       &trans_done, msecs_to_jiffies(timeout));
+       if (timeleft <= 0) {
+               pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
+               err = -ETIMEDOUT;
+               goto out;
+       }
+
+       spin_lock_irqsave(&pcr->lock, flags);
+       if (pcr->trans_result == TRANS_RESULT_FAIL) {
+               err = -EILSEQ;
+               if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
+                       pcr->dma_error_count++;
+       }
+
+       else if (pcr->trans_result == TRANS_NO_DEVICE)
+               err = -ENODEV;
+       spin_unlock_irqrestore(&pcr->lock, flags);
+
+out:
+       spin_lock_irqsave(&pcr->lock, flags);
+       pcr->done = NULL;
+       spin_unlock_irqrestore(&pcr->lock, flags);
+
+       if ((err < 0) && (err != -ENODEV))
+               rtsx_pci_stop_cmd(pcr);
+
+       if (pcr->finish_me)
+               complete(pcr->finish_me);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
+
+int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
+{
+       int err;
+       int i, j;
+       u16 reg;
+       u8 *ptr;
+
+       if (buf_len > 512)
+               buf_len = 512;
+
+       ptr = buf;
+       reg = PPBUF_BASE2;
+       for (i = 0; i < buf_len / 256; i++) {
+               rtsx_pci_init_cmd(pcr);
+
+               for (j = 0; j < 256; j++)
+                       rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
+
+               err = rtsx_pci_send_cmd(pcr, 250);
+               if (err < 0)
+                       return err;
+
+               memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
+               ptr += 256;
+       }
+
+       if (buf_len % 256) {
+               rtsx_pci_init_cmd(pcr);
+
+               for (j = 0; j < buf_len % 256; j++)
+                       rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
+
+               err = rtsx_pci_send_cmd(pcr, 250);
+               if (err < 0)
+                       return err;
+       }
+
+       memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
+
+int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
+{
+       int err;
+       int i, j;
+       u16 reg;
+       u8 *ptr;
+
+       if (buf_len > 512)
+               buf_len = 512;
+
+       ptr = buf;
+       reg = PPBUF_BASE2;
+       for (i = 0; i < buf_len / 256; i++) {
+               rtsx_pci_init_cmd(pcr);
+
+               for (j = 0; j < 256; j++) {
+                       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                                       reg++, 0xFF, *ptr);
+                       ptr++;
+               }
+
+               err = rtsx_pci_send_cmd(pcr, 250);
+               if (err < 0)
+                       return err;
+       }
+
+       if (buf_len % 256) {
+               rtsx_pci_init_cmd(pcr);
+
+               for (j = 0; j < buf_len % 256; j++) {
+                       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                                       reg++, 0xFF, *ptr);
+                       ptr++;
+               }
+
+               err = rtsx_pci_send_cmd(pcr, 250);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
+
+static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
+{
+       rtsx_pci_init_cmd(pcr);
+
+       while (*tbl & 0xFFFF0000) {
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                               (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
+               tbl++;
+       }
+
+       return rtsx_pci_send_cmd(pcr, 100);
+}
+
+int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
+{
+       const u32 *tbl;
+
+       if (card == RTSX_SD_CARD)
+               tbl = pcr->sd_pull_ctl_enable_tbl;
+       else if (card == RTSX_MS_CARD)
+               tbl = pcr->ms_pull_ctl_enable_tbl;
+       else
+               return -EINVAL;
+
+       return rtsx_pci_set_pull_ctl(pcr, tbl);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
+
+int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
+{
+       const u32 *tbl;
+
+       if (card == RTSX_SD_CARD)
+               tbl = pcr->sd_pull_ctl_disable_tbl;
+       else if (card == RTSX_MS_CARD)
+               tbl = pcr->ms_pull_ctl_disable_tbl;
+       else
+               return -EINVAL;
+
+
+       return rtsx_pci_set_pull_ctl(pcr, tbl);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
+
+static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
+{
+       pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
+
+       if (pcr->num_slots > 1)
+               pcr->bier |= MS_INT_EN;
+
+       /* Enable Bus Interrupt */
+       rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
+
+       pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
+}
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+       return ((depth > 1) ? (depth - 1) : depth);
+}
+
+static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
+{
+       if (div > CLK_DIV_1) {
+               if (ssc_depth > (div - 1))
+                       ssc_depth -= (div - 1);
+               else
+                       ssc_depth = SSC_DEPTH_4M;
+       }
+
+       return ssc_depth;
+}
+
+int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+       int err, clk;
+       u8 n, clk_divider, mcu_cnt, div;
+       static const u8 depth[] = {
+               [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
+               [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
+               [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
+               [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
+               [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
+       };
+
+       if (initial_mode) {
+               /* We use 250k(around) here, in initial stage */
+               clk_divider = SD_CLK_DIVIDE_128;
+               card_clock = 30000000;
+       } else {
+               clk_divider = SD_CLK_DIVIDE_0;
+       }
+       err = rtsx_pci_write_register(pcr, SD_CFG1,
+                       SD_CLK_DIVIDE_MASK, clk_divider);
+       if (err < 0)
+               return err;
+
+       /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
+       if (card_clock == UHS_SDR104_MAX_DTR &&
+           pcr->dma_error_count &&
+           PCI_PID(pcr) == RTS5227_DEVICE_ID)
+               card_clock = UHS_SDR104_MAX_DTR -
+                       (pcr->dma_error_count * 20000000);
+
+       card_clock /= 1000000;
+       pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+       clk = card_clock;
+       if (!initial_mode && double_clk)
+               clk = card_clock * 2;
+       pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+               clk, pcr->cur_clock);
+
+       if (clk == pcr->cur_clock)
+               return 0;
+
+       if (pcr->ops->conv_clk_and_div_n)
+               n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+       else
+               n = (u8)(clk - 2);
+       if ((clk <= 2) || (n > MAX_DIV_N_PCR))
+               return -EINVAL;
+
+       mcu_cnt = (u8)(125/clk + 3);
+       if (mcu_cnt > 15)
+               mcu_cnt = 15;
+
+       /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
+       div = CLK_DIV_1;
+       while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
+               if (pcr->ops->conv_clk_and_div_n) {
+                       int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+                                       DIV_N_TO_CLK) * 2;
+                       n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+                                       CLK_TO_DIV_N);
+               } else {
+                       n = (n + 2) * 2 - 2;
+               }
+               div++;
+       }
+       pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+       ssc_depth = depth[ssc_depth];
+       if (double_clk)
+               ssc_depth = double_ssc_depth(ssc_depth);
+
+       ssc_depth = revise_ssc_depth(ssc_depth, div);
+       pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+                       CLK_LOW_FREQ, CLK_LOW_FREQ);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+                       0xFF, (div << 4) | mcu_cnt);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+                       SSC_DEPTH_MASK, ssc_depth);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+       if (vpclk) {
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+                               PHASE_NOT_RESET, 0);
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+                               PHASE_NOT_RESET, PHASE_NOT_RESET);
+       }
+
+       err = rtsx_pci_send_cmd(pcr, 2000);
+       if (err < 0)
+               return err;
+
+       /* Wait SSC clock stable */
+       udelay(SSC_CLOCK_STABLE_WAIT);
+       err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+       if (err < 0)
+               return err;
+
+       pcr->cur_clock = clk;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
+
+int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       if (pcr->ops->card_power_on)
+               return pcr->ops->card_power_on(pcr, card);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
+
+int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+       if (pcr->ops->card_power_off)
+               return pcr->ops->card_power_off(pcr, card);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
+
+int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
+{
+       static const unsigned int cd_mask[] = {
+               [RTSX_SD_CARD] = SD_EXIST,
+               [RTSX_MS_CARD] = MS_EXIST
+       };
+
+       if (!(pcr->flags & PCR_MS_PMOS)) {
+               /* When using single PMOS, accessing card is not permitted
+                * if the existing card is not the designated one.
+                */
+               if (pcr->card_exist & (~cd_mask[card]))
+                       return -EIO;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
+
+int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       if (pcr->ops->switch_output_voltage)
+               return pcr->ops->switch_output_voltage(pcr, voltage);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
+
+unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
+{
+       unsigned int val;
+
+       val = rtsx_pci_readl(pcr, RTSX_BIPR);
+       if (pcr->ops->cd_deglitch)
+               val = pcr->ops->cd_deglitch(pcr);
+
+       return val;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
+
+void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
+{
+       struct completion finish;
+
+       pcr->finish_me = &finish;
+       init_completion(&finish);
+
+       if (pcr->done)
+               complete(pcr->done);
+
+       if (!pcr->remove_pci)
+               rtsx_pci_stop_cmd(pcr);
+
+       wait_for_completion_interruptible_timeout(&finish,
+                       msecs_to_jiffies(2));
+       pcr->finish_me = NULL;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
+
+static void rtsx_pci_card_detect(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+       struct rtsx_pcr *pcr;
+       unsigned long flags;
+       unsigned int card_detect = 0, card_inserted, card_removed;
+       u32 irq_status;
+
+       dwork = to_delayed_work(work);
+       pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
+
+       pcr_dbg(pcr, "--> %s\n", __func__);
+
+       mutex_lock(&pcr->pcr_mutex);
+       spin_lock_irqsave(&pcr->lock, flags);
+
+       irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
+       pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
+
+       irq_status &= CARD_EXIST;
+       card_inserted = pcr->card_inserted & irq_status;
+       card_removed = pcr->card_removed;
+       pcr->card_inserted = 0;
+       pcr->card_removed = 0;
+
+       spin_unlock_irqrestore(&pcr->lock, flags);
+
+       if (card_inserted || card_removed) {
+               pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
+                       card_inserted, card_removed);
+
+               if (pcr->ops->cd_deglitch)
+                       card_inserted = pcr->ops->cd_deglitch(pcr);
+
+               card_detect = card_inserted | card_removed;
+
+               pcr->card_exist |= card_inserted;
+               pcr->card_exist &= ~card_removed;
+       }
+
+       mutex_unlock(&pcr->pcr_mutex);
+
+       if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
+               pcr->slots[RTSX_SD_CARD].card_event(
+                               pcr->slots[RTSX_SD_CARD].p_dev);
+       if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
+               pcr->slots[RTSX_MS_CARD].card_event(
+                               pcr->slots[RTSX_MS_CARD].p_dev);
+}
+
+void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
+{
+       if (pcr->ops->process_ocp)
+               pcr->ops->process_ocp(pcr);
+}
+
+int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
+{
+       if (pcr->option.ocp_en)
+               rtsx_pci_process_ocp(pcr);
+
+       return 0;
+}
+
+static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
+{
+       struct rtsx_pcr *pcr = dev_id;
+       u32 int_reg;
+
+       if (!pcr)
+               return IRQ_NONE;
+
+       spin_lock(&pcr->lock);
+
+       int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
+       /* Clear interrupt flag */
+       rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
+       if ((int_reg & pcr->bier) == 0) {
+               spin_unlock(&pcr->lock);
+               return IRQ_NONE;
+       }
+       if (int_reg == 0xFFFFFFFF) {
+               spin_unlock(&pcr->lock);
+               return IRQ_HANDLED;
+       }
+
+       int_reg &= (pcr->bier | 0x7FFFFF);
+
+       if (int_reg & SD_OC_INT)
+               rtsx_pci_process_ocp_interrupt(pcr);
+
+       if (int_reg & SD_INT) {
+               if (int_reg & SD_EXIST) {
+                       pcr->card_inserted |= SD_EXIST;
+               } else {
+                       pcr->card_removed |= SD_EXIST;
+                       pcr->card_inserted &= ~SD_EXIST;
+               }
+               pcr->dma_error_count = 0;
+       }
+
+       if (int_reg & MS_INT) {
+               if (int_reg & MS_EXIST) {
+                       pcr->card_inserted |= MS_EXIST;
+               } else {
+                       pcr->card_removed |= MS_EXIST;
+                       pcr->card_inserted &= ~MS_EXIST;
+               }
+       }
+
+       if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
+               if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
+                       pcr->trans_result = TRANS_RESULT_FAIL;
+                       if (pcr->done)
+                               complete(pcr->done);
+               } else if (int_reg & TRANS_OK_INT) {
+                       pcr->trans_result = TRANS_RESULT_OK;
+                       if (pcr->done)
+                               complete(pcr->done);
+               }
+       }
+
+       if (pcr->card_inserted || pcr->card_removed)
+               schedule_delayed_work(&pcr->carddet_work,
+                               msecs_to_jiffies(200));
+
+       spin_unlock(&pcr->lock);
+       return IRQ_HANDLED;
+}
+
+static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
+{
+       pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
+                       __func__, pcr->msi_en, pcr->pci->irq);
+
+       if (request_irq(pcr->pci->irq, rtsx_pci_isr,
+                       pcr->msi_en ? 0 : IRQF_SHARED,
+                       DRV_NAME_RTSX_PCI, pcr)) {
+               dev_err(&(pcr->pci->dev),
+                       "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
+                       pcr->pci->irq);
+               return -1;
+       }
+
+       pcr->irq = pcr->pci->irq;
+       pci_intx(pcr->pci, !pcr->msi_en);
+
+       return 0;
+}
+
+static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
+{
+       if (pcr->ops->set_aspm)
+               pcr->ops->set_aspm(pcr, true);
+       else
+               rtsx_comm_set_aspm(pcr, true);
+}
+
+static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       if (option->ltr_enabled) {
+               u32 latency = option->ltr_l1off_latency;
+
+               if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
+                       mdelay(option->l1_snooze_delay);
+
+               rtsx_set_ltr_latency(pcr, latency);
+       }
+
+       if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+               rtsx_set_l1off_sub_cfg_d0(pcr, 0);
+
+       rtsx_enable_aspm(pcr);
+}
+
+void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
+{
+       if (pcr->ops->power_saving)
+               pcr->ops->power_saving(pcr);
+       else
+               rtsx_comm_pm_power_saving(pcr);
+}
+
+static void rtsx_pci_idle_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
+
+       pcr_dbg(pcr, "--> %s\n", __func__);
+
+       mutex_lock(&pcr->pcr_mutex);
+
+       pcr->state = PDEV_STAT_IDLE;
+
+       if (pcr->ops->disable_auto_blink)
+               pcr->ops->disable_auto_blink(pcr);
+       if (pcr->ops->turn_off_led)
+               pcr->ops->turn_off_led(pcr);
+
+       rtsx_pm_power_saving(pcr);
+
+       mutex_unlock(&pcr->pcr_mutex);
+}
+
+#ifdef CONFIG_PM
+static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       if (pcr->ops->turn_off_led)
+               pcr->ops->turn_off_led(pcr);
+
+       rtsx_pci_writel(pcr, RTSX_BIER, 0);
+       pcr->bier = 0;
+
+       rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
+       rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
+
+       if (pcr->ops->force_power_down)
+               pcr->ops->force_power_down(pcr, pm_state);
+}
+#endif
+
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
+{
+       u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
+
+       if (pcr->ops->enable_ocp)
+               pcr->ops->enable_ocp(pcr);
+       else
+               rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
+{
+       u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
+
+       if (pcr->ops->disable_ocp)
+               pcr->ops->disable_ocp(pcr);
+       else
+               rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+}
+
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
+{
+       if (pcr->ops->init_ocp) {
+               pcr->ops->init_ocp(pcr);
+       } else {
+               struct rtsx_cr_option *option = &(pcr->option);
+
+               if (option->ocp_en) {
+                       u8 val = option->sd_400mA_ocp_thd;
+
+                       rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+                       rtsx_pci_write_register(pcr, REG_OCPPARA1,
+                               SD_OCP_TIME_MASK, SD_OCP_TIME_800);
+                       rtsx_pci_write_register(pcr, REG_OCPPARA2,
+                               SD_OCP_THD_MASK, val);
+                       rtsx_pci_write_register(pcr, REG_OCPGLITCH,
+                               SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
+                       rtsx_pci_enable_ocp(pcr);
+               } else {
+                       /* OC power down */
+                       rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+                               OC_POWER_DOWN);
+               }
+       }
+}
+
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+       if (pcr->ops->get_ocpstat)
+               return pcr->ops->get_ocpstat(pcr, val);
+       else
+               return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+       if (pcr->ops->clear_ocpstat) {
+               pcr->ops->clear_ocpstat(pcr);
+       } else {
+               u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
+               u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+               rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+               rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+       }
+}
+
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+               MS_CLK_EN | SD40_CLK_EN, 0);
+       rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+
+       rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+
+       msleep(50);
+
+       rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
+
+       return 0;
+}
+
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+               MS_CLK_EN | SD40_CLK_EN, 0);
+
+       rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
+
+       rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+       rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
+
+       return 0;
+}
+
+static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+{
+       int err;
+
+       pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
+       rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
+
+       rtsx_pci_enable_bus_int(pcr);
+
+       /* Power on SSC */
+       err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+       if (err < 0)
+               return err;
+
+       /* Wait SSC power stable */
+       udelay(200);
+
+       rtsx_pci_disable_aspm(pcr);
+       if (pcr->ops->optimize_phy) {
+               err = pcr->ops->optimize_phy(pcr);
+               if (err < 0)
+                       return err;
+       }
+
+       rtsx_pci_init_cmd(pcr);
+
+       /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
+       /* Disable card clock */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
+       /* Reset delink mode */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
+       /* Card driving select */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
+                       0xFF, pcr->card_drive_sel);
+       /* Enable SSC Clock */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
+                       0xFF, SSC_8X_EN | SSC_SEL_4M);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+       /* Disable cd_pwr_save */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
+       /* Clear Link Ready Interrupt */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
+                       LINK_RDY_INT, LINK_RDY_INT);
+       /* Enlarge the estimation window of PERST# glitch
+        * to reduce the chance of invalid card interrupt
+        */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
+       /* Update RC oscillator to 400k
+        * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
+        *                1: 2M  0: 400k
+        */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
+       /* Set interrupt write clear
+        * bit 1: U_elbi_if_rd_clr_en
+        *      1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
+        *      0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
+        */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
+
+       err = rtsx_pci_send_cmd(pcr, 100);
+       if (err < 0)
+               return err;
+
+       switch (PCI_PID(pcr)) {
+       case PID_5250:
+       case PID_524A:
+       case PID_525A:
+       case PID_5260:
+               rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
+               break;
+       default:
+               break;
+       }
+
+       /* Enable clk_request_n to enable clock power management */
+       rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
+       /* Enter L1 when host tx idle */
+       rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
+
+       if (pcr->ops->extra_init_hw) {
+               err = pcr->ops->extra_init_hw(pcr);
+               if (err < 0)
+                       return err;
+       }
+
+       /* No CD interrupt if probing driver with card inserted.
+        * So we need to initialize pcr->card_exist here.
+        */
+       if (pcr->ops->cd_deglitch)
+               pcr->card_exist = pcr->ops->cd_deglitch(pcr);
+       else
+               pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
+
+       return 0;
+}
+
+static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
+{
+       int err;
+
+       spin_lock_init(&pcr->lock);
+       mutex_init(&pcr->pcr_mutex);
+
+       switch (PCI_PID(pcr)) {
+       default:
+       case 0x5209:
+               rts5209_init_params(pcr);
+               break;
+
+       case 0x5229:
+               rts5229_init_params(pcr);
+               break;
+
+       case 0x5289:
+               rtl8411_init_params(pcr);
+               break;
+
+       case 0x5227:
+               rts5227_init_params(pcr);
+               break;
+
+       case 0x522A:
+               rts522a_init_params(pcr);
+               break;
+
+       case 0x5249:
+               rts5249_init_params(pcr);
+               break;
+
+       case 0x524A:
+               rts524a_init_params(pcr);
+               break;
+
+       case 0x525A:
+               rts525a_init_params(pcr);
+               break;
+
+       case 0x5287:
+               rtl8411b_init_params(pcr);
+               break;
+
+       case 0x5286:
+               rtl8402_init_params(pcr);
+               break;
+       case 0x5260:
+               rts5260_init_params(pcr);
+               break;
+       }
+
+       pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
+                       PCI_PID(pcr), pcr->ic_version);
+
+       pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
+                       GFP_KERNEL);
+       if (!pcr->slots)
+               return -ENOMEM;
+
+       if (pcr->ops->fetch_vendor_settings)
+               pcr->ops->fetch_vendor_settings(pcr);
+
+       pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
+       pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
+                       pcr->sd30_drive_sel_1v8);
+       pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
+                       pcr->sd30_drive_sel_3v3);
+       pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
+                       pcr->card_drive_sel);
+       pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
+
+       pcr->state = PDEV_STAT_IDLE;
+       err = rtsx_pci_init_hw(pcr);
+       if (err < 0) {
+               kfree(pcr->slots);
+               return err;
+       }
+
+       return 0;
+}
+
+static int rtsx_pci_probe(struct pci_dev *pcidev,
+                         const struct pci_device_id *id)
+{
+       struct rtsx_pcr *pcr;
+       struct pcr_handle *handle;
+       u32 base, len;
+       int ret, i, bar = 0;
+
+       dev_dbg(&(pcidev->dev),
+               ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
+               pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
+               (int)pcidev->revision);
+
+       ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+       if (ret < 0)
+               return ret;
+
+       ret = pci_enable_device(pcidev);
+       if (ret)
+               return ret;
+
+       ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
+       if (ret)
+               goto disable;
+
+       pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
+       if (!pcr) {
+               ret = -ENOMEM;
+               goto release_pci;
+       }
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle) {
+               ret = -ENOMEM;
+               goto free_pcr;
+       }
+       handle->pcr = pcr;
+
+       idr_preload(GFP_KERNEL);
+       spin_lock(&rtsx_pci_lock);
+       ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
+       if (ret >= 0)
+               pcr->id = ret;
+       spin_unlock(&rtsx_pci_lock);
+       idr_preload_end();
+       if (ret < 0)
+               goto free_handle;
+
+       pcr->pci = pcidev;
+       dev_set_drvdata(&pcidev->dev, handle);
+
+       if (CHK_PCI_PID(pcr, 0x525A))
+               bar = 1;
+       len = pci_resource_len(pcidev, bar);
+       base = pci_resource_start(pcidev, bar);
+       pcr->remap_addr = ioremap_nocache(base, len);
+       if (!pcr->remap_addr) {
+               ret = -ENOMEM;
+               goto free_handle;
+       }
+
+       pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
+                       RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
+                       GFP_KERNEL);
+       if (pcr->rtsx_resv_buf == NULL) {
+               ret = -ENXIO;
+               goto unmap;
+       }
+       pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
+       pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
+       pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
+       pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
+
+       pcr->card_inserted = 0;
+       pcr->card_removed = 0;
+       INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
+       INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
+
+       pcr->msi_en = msi_en;
+       if (pcr->msi_en) {
+               ret = pci_enable_msi(pcidev);
+               if (ret)
+                       pcr->msi_en = false;
+       }
+
+       ret = rtsx_pci_acquire_irq(pcr);
+       if (ret < 0)
+               goto disable_msi;
+
+       pci_set_master(pcidev);
+       synchronize_irq(pcr->irq);
+
+       ret = rtsx_pci_init_chip(pcr);
+       if (ret < 0)
+               goto disable_irq;
+
+       for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
+               rtsx_pcr_cells[i].platform_data = handle;
+               rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
+       }
+       ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
+                       ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
+       if (ret < 0)
+               goto disable_irq;
+
+       schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
+
+       return 0;
+
+disable_irq:
+       free_irq(pcr->irq, (void *)pcr);
+disable_msi:
+       if (pcr->msi_en)
+               pci_disable_msi(pcr->pci);
+       dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
+                       pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
+unmap:
+       iounmap(pcr->remap_addr);
+free_handle:
+       kfree(handle);
+free_pcr:
+       kfree(pcr);
+release_pci:
+       pci_release_regions(pcidev);
+disable:
+       pci_disable_device(pcidev);
+
+       return ret;
+}
+
+static void rtsx_pci_remove(struct pci_dev *pcidev)
+{
+       struct pcr_handle *handle = pci_get_drvdata(pcidev);
+       struct rtsx_pcr *pcr = handle->pcr;
+
+       pcr->remove_pci = true;
+
+       /* Disable interrupts at the pcr level */
+       spin_lock_irq(&pcr->lock);
+       rtsx_pci_writel(pcr, RTSX_BIER, 0);
+       pcr->bier = 0;
+       spin_unlock_irq(&pcr->lock);
+
+       cancel_delayed_work_sync(&pcr->carddet_work);
+       cancel_delayed_work_sync(&pcr->idle_work);
+
+       mfd_remove_devices(&pcidev->dev);
+
+       dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
+                       pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
+       free_irq(pcr->irq, (void *)pcr);
+       if (pcr->msi_en)
+               pci_disable_msi(pcr->pci);
+       iounmap(pcr->remap_addr);
+
+       pci_release_regions(pcidev);
+       pci_disable_device(pcidev);
+
+       spin_lock(&rtsx_pci_lock);
+       idr_remove(&rtsx_pci_idr, pcr->id);
+       spin_unlock(&rtsx_pci_lock);
+
+       kfree(pcr->slots);
+       kfree(pcr);
+       kfree(handle);
+
+       dev_dbg(&(pcidev->dev),
+               ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
+               pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
+}
+
+#ifdef CONFIG_PM
+
+static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
+{
+       struct pcr_handle *handle;
+       struct rtsx_pcr *pcr;
+
+       dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+       handle = pci_get_drvdata(pcidev);
+       pcr = handle->pcr;
+
+       cancel_delayed_work(&pcr->carddet_work);
+       cancel_delayed_work(&pcr->idle_work);
+
+       mutex_lock(&pcr->pcr_mutex);
+
+       rtsx_pci_power_off(pcr, HOST_ENTER_S3);
+
+       pci_save_state(pcidev);
+       pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
+       pci_disable_device(pcidev);
+       pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
+
+       mutex_unlock(&pcr->pcr_mutex);
+       return 0;
+}
+
+static int rtsx_pci_resume(struct pci_dev *pcidev)
+{
+       struct pcr_handle *handle;
+       struct rtsx_pcr *pcr;
+       int ret = 0;
+
+       dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+       handle = pci_get_drvdata(pcidev);
+       pcr = handle->pcr;
+
+       mutex_lock(&pcr->pcr_mutex);
+
+       pci_set_power_state(pcidev, PCI_D0);
+       pci_restore_state(pcidev);
+       ret = pci_enable_device(pcidev);
+       if (ret)
+               goto out;
+       pci_set_master(pcidev);
+
+       ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
+       if (ret)
+               goto out;
+
+       ret = rtsx_pci_init_hw(pcr);
+       if (ret)
+               goto out;
+
+       schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
+
+out:
+       mutex_unlock(&pcr->pcr_mutex);
+       return ret;
+}
+
+static void rtsx_pci_shutdown(struct pci_dev *pcidev)
+{
+       struct pcr_handle *handle;
+       struct rtsx_pcr *pcr;
+
+       dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+       handle = pci_get_drvdata(pcidev);
+       pcr = handle->pcr;
+       rtsx_pci_power_off(pcr, HOST_ENTER_S1);
+
+       pci_disable_device(pcidev);
+       free_irq(pcr->irq, (void *)pcr);
+       if (pcr->msi_en)
+               pci_disable_msi(pcr->pci);
+}
+
+#else /* CONFIG_PM */
+
+#define rtsx_pci_suspend NULL
+#define rtsx_pci_resume NULL
+#define rtsx_pci_shutdown NULL
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver rtsx_pci_driver = {
+       .name = DRV_NAME_RTSX_PCI,
+       .id_table = rtsx_pci_ids,
+       .probe = rtsx_pci_probe,
+       .remove = rtsx_pci_remove,
+       .suspend = rtsx_pci_suspend,
+       .resume = rtsx_pci_resume,
+       .shutdown = rtsx_pci_shutdown,
+};
+module_pci_driver(rtsx_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
+MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
new file mode 100644 (file)
index 0000000..6ea1655
--- /dev/null
@@ -0,0 +1,113 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#ifndef __RTSX_PCR_H
+#define __RTSX_PCR_H
+
+#include <linux/rtsx_pci.h>
+
+#define MIN_DIV_N_PCR          80
+#define MAX_DIV_N_PCR          208
+
+#define RTS522A_PM_CTRL3               0xFF7E
+
+#define RTS524A_PME_FORCE_CTL          0xFF78
+#define RTS524A_PM_CTRL3               0xFF7E
+
+#define LTR_ACTIVE_LATENCY_DEF         0x883C
+#define LTR_IDLE_LATENCY_DEF           0x892C
+#define LTR_L1OFF_LATENCY_DEF          0x9003
+#define L1_SNOOZE_DELAY_DEF            1
+#define LTR_L1OFF_SSPWRGATE_5249_DEF           0xAF
+#define LTR_L1OFF_SSPWRGATE_5250_DEF           0xFF
+#define LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF    0xAC
+#define LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF    0xF8
+#define CMD_TIMEOUT_DEF                100
+#define ASPM_MASK_NEG          0xFC
+#define MASK_8_BIT_DEF         0xFF
+
+#define SSC_CLOCK_STABLE_WAIT  130
+
+int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
+int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
+
+void rts5209_init_params(struct rtsx_pcr *pcr);
+void rts5229_init_params(struct rtsx_pcr *pcr);
+void rtl8411_init_params(struct rtsx_pcr *pcr);
+void rtl8402_init_params(struct rtsx_pcr *pcr);
+void rts5227_init_params(struct rtsx_pcr *pcr);
+void rts522a_init_params(struct rtsx_pcr *pcr);
+void rts5249_init_params(struct rtsx_pcr *pcr);
+void rts524a_init_params(struct rtsx_pcr *pcr);
+void rts525a_init_params(struct rtsx_pcr *pcr);
+void rtl8411b_init_params(struct rtsx_pcr *pcr);
+void rts5260_init_params(struct rtsx_pcr *pcr);
+
+static inline u8 map_sd_drive(int idx)
+{
+       u8 sd_drive[4] = {
+               0x01,   /* Type D */
+               0x02,   /* Type C */
+               0x05,   /* Type A */
+               0x03    /* Type B */
+       };
+
+       return sd_drive[idx];
+}
+
+#define rtsx_vendor_setting_valid(reg)         (!((reg) & 0x1000000))
+#define rts5209_vendor_setting1_valid(reg)     (!((reg) & 0x80))
+#define rts5209_vendor_setting2_valid(reg)     ((reg) & 0x80)
+
+#define rtsx_reg_to_aspm(reg)                  (((reg) >> 28) & 0x03)
+#define rtsx_reg_to_sd30_drive_sel_1v8(reg)    (((reg) >> 26) & 0x03)
+#define rtsx_reg_to_sd30_drive_sel_3v3(reg)    (((reg) >> 5) & 0x03)
+#define rtsx_reg_to_card_drive_sel(reg)                ((((reg) >> 25) & 0x01) << 6)
+#define rtsx_reg_check_reverse_socket(reg)     ((reg) & 0x4000)
+#define rts5209_reg_to_aspm(reg)               (((reg) >> 5) & 0x03)
+#define rts5209_reg_check_ms_pmos(reg)         (!((reg) & 0x08))
+#define rts5209_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 3) & 0x07)
+#define rts5209_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x07)
+#define rts5209_reg_to_card_drive_sel(reg)     ((reg) >> 8)
+#define rtl8411_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x07)
+#define rtl8411b_reg_to_sd30_drive_sel_3v3(reg)        ((reg) & 0x03)
+
+#define set_pull_ctrl_tables(pcr, __device)                            \
+do {                                                                   \
+       pcr->sd_pull_ctl_enable_tbl  = __device##_sd_pull_ctl_enable_tbl;  \
+       pcr->sd_pull_ctl_disable_tbl = __device##_sd_pull_ctl_disable_tbl; \
+       pcr->ms_pull_ctl_enable_tbl  = __device##_ms_pull_ctl_enable_tbl;  \
+       pcr->ms_pull_ctl_disable_tbl = __device##_ms_pull_ctl_disable_tbl; \
+} while (0)
+
+/* generic operations */
+int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
+int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
+int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
+
+#endif
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
new file mode 100644 (file)
index 0000000..b97903f
--- /dev/null
@@ -0,0 +1,791 @@
+/* Driver for Realtek USB card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Roger Tseng <rogerable@realtek.com>
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/rtsx_usb.h>
+
+static int polling_pipe = 1;
+module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(polling_pipe, "polling pipe (0: ctl, 1: bulk)");
+
+static const struct mfd_cell rtsx_usb_cells[] = {
+       [RTSX_USB_SD_CARD] = {
+               .name = "rtsx_usb_sdmmc",
+               .pdata_size = 0,
+       },
+       [RTSX_USB_MS_CARD] = {
+               .name = "rtsx_usb_ms",
+               .pdata_size = 0,
+       },
+};
+
+static void rtsx_usb_sg_timed_out(struct timer_list *t)
+{
+       struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
+
+       dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
+       usb_sg_cancel(&ucr->current_sg);
+}
+
+static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr,
+               unsigned int pipe, struct scatterlist *sg, int num_sg,
+               unsigned int length, unsigned int *act_len, int timeout)
+{
+       int ret;
+
+       dev_dbg(&ucr->pusb_intf->dev, "%s: xfer %u bytes, %d entries\n",
+                       __func__, length, num_sg);
+       ret = usb_sg_init(&ucr->current_sg, ucr->pusb_dev, pipe, 0,
+                       sg, num_sg, length, GFP_NOIO);
+       if (ret)
+               return ret;
+
+       ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout);
+       add_timer(&ucr->sg_timer);
+       usb_sg_wait(&ucr->current_sg);
+       if (!del_timer_sync(&ucr->sg_timer))
+               ret = -ETIMEDOUT;
+       else
+               ret = ucr->current_sg.status;
+
+       if (act_len)
+               *act_len = ucr->current_sg.bytes;
+
+       return ret;
+}
+
+int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
+                             void *buf, unsigned int len, int num_sg,
+                             unsigned int *act_len, int timeout)
+{
+       if (timeout < 600)
+               timeout = 600;
+
+       if (num_sg)
+               return rtsx_usb_bulk_transfer_sglist(ucr, pipe,
+                               (struct scatterlist *)buf, num_sg, len, act_len,
+                               timeout);
+       else
+               return usb_bulk_msg(ucr->pusb_dev, pipe, buf, len, act_len,
+                               timeout);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_transfer_data);
+
+static inline void rtsx_usb_seq_cmd_hdr(struct rtsx_ucr *ucr,
+               u16 addr, u16 len, u8 seq_type)
+{
+       rtsx_usb_cmd_hdr_tag(ucr);
+
+       ucr->cmd_buf[PACKET_TYPE] = seq_type;
+       ucr->cmd_buf[5] = (u8)(len >> 8);
+       ucr->cmd_buf[6] = (u8)len;
+       ucr->cmd_buf[8] = (u8)(addr >> 8);
+       ucr->cmd_buf[9] = (u8)addr;
+
+       if (seq_type == SEQ_WRITE)
+               ucr->cmd_buf[STAGE_FLAG] = 0;
+       else
+               ucr->cmd_buf[STAGE_FLAG] = STAGE_R;
+}
+
+static int rtsx_usb_seq_write_register(struct rtsx_ucr *ucr,
+               u16 addr, u16 len, u8 *data)
+{
+       u16 cmd_len = ALIGN(SEQ_WRITE_DATA_OFFSET + len, 4);
+
+       if (!data)
+               return -EINVAL;
+
+       if (cmd_len > IOBUF_SIZE)
+               return -EINVAL;
+
+       rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_WRITE);
+       memcpy(ucr->cmd_buf + SEQ_WRITE_DATA_OFFSET, data, len);
+
+       return rtsx_usb_transfer_data(ucr,
+                       usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
+                       ucr->cmd_buf, cmd_len, 0, NULL, 100);
+}
+
+static int rtsx_usb_seq_read_register(struct rtsx_ucr *ucr,
+               u16 addr, u16 len, u8 *data)
+{
+       int i, ret;
+       u16 rsp_len = round_down(len, 4);
+       u16 res_len = len - rsp_len;
+
+       if (!data)
+               return -EINVAL;
+
+       /* 4-byte aligned part */
+       if (rsp_len) {
+               rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_READ);
+               ret = rtsx_usb_transfer_data(ucr,
+                               usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
+                               ucr->cmd_buf, 12, 0, NULL, 100);
+               if (ret)
+                       return ret;
+
+               ret = rtsx_usb_transfer_data(ucr,
+                               usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN),
+                               data, rsp_len, 0, NULL, 100);
+               if (ret)
+                       return ret;
+       }
+
+       /* unaligned part */
+       for (i = 0; i < res_len; i++) {
+               ret = rtsx_usb_read_register(ucr, addr + rsp_len + i,
+                               data + rsp_len + i);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len)
+{
+       return rtsx_usb_seq_read_register(ucr, PPBUF_BASE2, (u16)buf_len, buf);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_read_ppbuf);
+
+int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len)
+{
+       return rtsx_usb_seq_write_register(ucr, PPBUF_BASE2, (u16)buf_len, buf);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_write_ppbuf);
+
+int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr,
+               u8 mask, u8 data)
+{
+       u16 value, index;
+
+       addr |= EP0_WRITE_REG_CMD << EP0_OP_SHIFT;
+       value = swab16(addr);
+       index = mask | data << 8;
+
+       return usb_control_msg(ucr->pusb_dev,
+                       usb_sndctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
+                       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                       value, index, NULL, 0, 100);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
+
+int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
+{
+       u16 value;
+       u8 *buf;
+       int ret;
+
+       if (!data)
+               return -EINVAL;
+
+       buf = kzalloc(sizeof(u8), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
+       value = swab16(addr);
+
+       ret = usb_control_msg(ucr->pusb_dev,
+                       usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
+                       USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                       value, 0, buf, 1, 100);
+       *data = *buf;
+
+       kfree(buf);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
+
+void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr,
+               u8 mask, u8 data)
+{
+       int i;
+
+       if (ucr->cmd_idx < (IOBUF_SIZE - CMD_OFFSET) / 4) {
+               i = CMD_OFFSET + ucr->cmd_idx * 4;
+
+               ucr->cmd_buf[i++] = ((cmd_type & 0x03) << 6) |
+                       (u8)((reg_addr >> 8) & 0x3F);
+               ucr->cmd_buf[i++] = (u8)reg_addr;
+               ucr->cmd_buf[i++] = mask;
+               ucr->cmd_buf[i++] = data;
+
+               ucr->cmd_idx++;
+       }
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_add_cmd);
+
+int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout)
+{
+       int ret;
+
+       ucr->cmd_buf[CNT_H] = (u8)(ucr->cmd_idx >> 8);
+       ucr->cmd_buf[CNT_L] = (u8)(ucr->cmd_idx);
+       ucr->cmd_buf[STAGE_FLAG] = flag;
+
+       ret = rtsx_usb_transfer_data(ucr,
+                       usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
+                       ucr->cmd_buf, ucr->cmd_idx * 4 + CMD_OFFSET,
+                       0, NULL, timeout);
+       if (ret) {
+               rtsx_usb_clear_fsm_err(ucr);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_send_cmd);
+
+int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout)
+{
+       if (rsp_len <= 0)
+               return -EINVAL;
+
+       rsp_len = ALIGN(rsp_len, 4);
+
+       return rtsx_usb_transfer_data(ucr,
+                       usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN),
+                       ucr->rsp_buf, rsp_len, 0, NULL, timeout);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_get_rsp);
+
+static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
+{
+       int ret;
+
+       rtsx_usb_init_cmd(ucr);
+       rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_EXIST, 0x00, 0x00);
+       rtsx_usb_add_cmd(ucr, READ_REG_CMD, OCPSTAT, 0x00, 0x00);
+       ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
+       if (ret)
+               return ret;
+
+       ret = rtsx_usb_get_rsp(ucr, 2, 100);
+       if (ret)
+               return ret;
+
+       *status = ((ucr->rsp_buf[0] >> 2) & 0x0f) |
+                 ((ucr->rsp_buf[1] & 0x03) << 4);
+
+       return 0;
+}
+
+int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+{
+       int ret;
+       u16 *buf;
+
+       if (!status)
+               return -EINVAL;
+
+       if (polling_pipe == 0) {
+               buf = kzalloc(sizeof(u16), GFP_KERNEL);
+               if (!buf)
+                       return -ENOMEM;
+
+               ret = usb_control_msg(ucr->pusb_dev,
+                               usb_rcvctrlpipe(ucr->pusb_dev, 0),
+                               RTSX_USB_REQ_POLL,
+                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                               0, 0, buf, 2, 100);
+               *status = *buf;
+
+               kfree(buf);
+       } else {
+               ret = rtsx_usb_get_status_with_bulk(ucr, status);
+       }
+
+       /* usb_control_msg may return positive when success */
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_get_card_status);
+
+static int rtsx_usb_write_phy_register(struct rtsx_ucr *ucr, u8 addr, u8 val)
+{
+       dev_dbg(&ucr->pusb_intf->dev, "Write 0x%x to phy register 0x%x\n",
+                       val, addr);
+
+       rtsx_usb_init_cmd(ucr);
+
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VSTAIN, 0xFF, val);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL, 0xFF, addr & 0x0F);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL,
+                       0xFF, (addr >> 4) & 0x0F);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
+
+       return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data)
+{
+       rtsx_usb_init_cmd(ucr);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, addr, mask, data);
+       return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_write_register);
+
+int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
+{
+       int ret;
+
+       if (data != NULL)
+               *data = 0;
+
+       rtsx_usb_init_cmd(ucr);
+       rtsx_usb_add_cmd(ucr, READ_REG_CMD, addr, 0, 0);
+       ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
+       if (ret)
+               return ret;
+
+       ret = rtsx_usb_get_rsp(ucr, 1, 100);
+       if (ret)
+               return ret;
+
+       if (data != NULL)
+               *data = ucr->rsp_buf[0];
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_read_register);
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+       return (depth > 1) ? (depth - 1) : depth;
+}
+
+static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
+{
+       if (div > CLK_DIV_1) {
+               if (ssc_depth > div - 1)
+                       ssc_depth -= (div - 1);
+               else
+                       ssc_depth = SSC_DEPTH_2M;
+       }
+
+       return ssc_depth;
+}
+
+int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock,
+               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+       int ret;
+       u8 n, clk_divider, mcu_cnt, div;
+
+       if (!card_clock) {
+               ucr->cur_clk = 0;
+               return 0;
+       }
+
+       if (initial_mode) {
+               /* We use 250k(around) here, in initial stage */
+               clk_divider = SD_CLK_DIVIDE_128;
+               card_clock = 30000000;
+       } else {
+               clk_divider = SD_CLK_DIVIDE_0;
+       }
+
+       ret = rtsx_usb_write_register(ucr, SD_CFG1,
+                       SD_CLK_DIVIDE_MASK, clk_divider);
+       if (ret < 0)
+               return ret;
+
+       card_clock /= 1000000;
+       dev_dbg(&ucr->pusb_intf->dev,
+                       "Switch card clock to %dMHz\n", card_clock);
+
+       if (!initial_mode && double_clk)
+               card_clock *= 2;
+       dev_dbg(&ucr->pusb_intf->dev,
+                       "Internal SSC clock: %dMHz (cur_clk = %d)\n",
+                       card_clock, ucr->cur_clk);
+
+       if (card_clock == ucr->cur_clk)
+               return 0;
+
+       /* Converting clock value into internal settings: n and div */
+       n = card_clock - 2;
+       if ((card_clock <= 2) || (n > MAX_DIV_N))
+               return -EINVAL;
+
+       mcu_cnt = 60/card_clock + 3;
+       if (mcu_cnt > 15)
+               mcu_cnt = 15;
+
+       /* Make sure that the SSC clock div_n is not less than MIN_DIV_N */
+
+       div = CLK_DIV_1;
+       while (n < MIN_DIV_N && div < CLK_DIV_4) {
+               n = (n + 2) * 2 - 2;
+               div++;
+       }
+       dev_dbg(&ucr->pusb_intf->dev, "n = %d, div = %d\n", n, div);
+
+       if (double_clk)
+               ssc_depth = double_ssc_depth(ssc_depth);
+
+       ssc_depth = revise_ssc_depth(ssc_depth, div);
+       dev_dbg(&ucr->pusb_intf->dev, "ssc_depth = %d\n", ssc_depth);
+
+       rtsx_usb_init_cmd(ucr);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV,
+                       0x3F, (div << 4) | mcu_cnt);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL2,
+                       SSC_DEPTH_MASK, ssc_depth);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+       if (vpclk) {
+               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+                               PHASE_NOT_RESET, 0);
+               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+                               PHASE_NOT_RESET, PHASE_NOT_RESET);
+       }
+
+       ret = rtsx_usb_send_cmd(ucr, MODE_C, 2000);
+       if (ret < 0)
+               return ret;
+
+       ret = rtsx_usb_write_register(ucr, SSC_CTL1, 0xff,
+                       SSC_RSTB | SSC_8X_EN | SSC_SEL_4M);
+       if (ret < 0)
+               return ret;
+
+       /* Wait SSC clock stable */
+       usleep_range(100, 1000);
+
+       ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0);
+       if (ret < 0)
+               return ret;
+
+       ucr->cur_clk = card_clock;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_switch_clock);
+
+int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card)
+{
+       int ret;
+       u16 val;
+       u16 cd_mask[] = {
+               [RTSX_USB_SD_CARD] = (CD_MASK & ~SD_CD),
+               [RTSX_USB_MS_CARD] = (CD_MASK & ~MS_CD)
+       };
+
+       ret = rtsx_usb_get_card_status(ucr, &val);
+       /*
+        * If get status fails, return 0 (ok) for the exclusive check
+        * and let the flow fail at somewhere else.
+        */
+       if (ret)
+               return 0;
+
+       if (val & cd_mask[card])
+               return -EIO;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_usb_card_exclusive_check);
+
+static int rtsx_usb_reset_chip(struct rtsx_ucr *ucr)
+{
+       int ret;
+       u8 val;
+
+       rtsx_usb_init_cmd(ucr);
+
+       if (CHECK_PKG(ucr, LQFP48)) {
+               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+                               LDO3318_PWR_MASK, LDO_SUSPEND);
+               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+                               FORCE_LDO_POWERB, FORCE_LDO_POWERB);
+               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1,
+                               0x30, 0x10);
+               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5,
+                               0x03, 0x01);
+               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6,
+                               0x0C, 0x04);
+       }
+
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SYS_DUMMY0, NYET_MSAK, NYET_EN);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CD_DEGLITCH_WIDTH, 0xFF, 0x08);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+                       CD_DEGLITCH_EN, XD_CD_DEGLITCH_EN, 0x0);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       SD30_DRIVE_MASK, DRIVER_TYPE_D);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+                       CARD_DRIVE_SEL, SD20_DRIVE_MASK, 0x0);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, 0xE0, 0x0);
+
+       if (ucr->is_rts5179)
+               rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+                               CARD_PULL_CTL5, 0x03, 0x01);
+
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DMA1_CTL,
+                      EXTEND_DMA1_ASYNC_SIGNAL, EXTEND_DMA1_ASYNC_SIGNAL);
+       rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_INT_PEND,
+                       XD_INT | MS_INT | SD_INT,
+                       XD_INT | MS_INT | SD_INT);
+
+       ret = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+       if (ret)
+               return ret;
+
+       /* config non-crystal mode */
+       rtsx_usb_read_register(ucr, CFG_MODE, &val);
+       if ((val & XTAL_FREE) || ((val & CLK_MODE_MASK) == CLK_MODE_NON_XTAL)) {
+               ret = rtsx_usb_write_phy_register(ucr, 0xC2, 0x7C);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int rtsx_usb_init_chip(struct rtsx_ucr *ucr)
+{
+       int ret;
+       u8 val;
+
+       rtsx_usb_clear_fsm_err(ucr);
+
+       /* power on SSC */
+       ret = rtsx_usb_write_register(ucr,
+                       FPDCTL, SSC_POWER_MASK, SSC_POWER_ON);
+       if (ret)
+               return ret;
+
+       usleep_range(100, 1000);
+       ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0x00);
+       if (ret)
+               return ret;
+
+       /* determine IC version */
+       ret = rtsx_usb_read_register(ucr, HW_VERSION, &val);
+       if (ret)
+               return ret;
+
+       ucr->ic_version = val & HW_VER_MASK;
+
+       /* determine package */
+       ret = rtsx_usb_read_register(ucr, CARD_SHARE_MODE, &val);
+       if (ret)
+               return ret;
+
+       if (val & CARD_SHARE_LQFP_SEL) {
+               ucr->package = LQFP48;
+               dev_dbg(&ucr->pusb_intf->dev, "Package: LQFP48\n");
+       } else {
+               ucr->package = QFN24;
+               dev_dbg(&ucr->pusb_intf->dev, "Package: QFN24\n");
+       }
+
+       /* determine IC variations */
+       rtsx_usb_read_register(ucr, CFG_MODE_1, &val);
+       if (val & RTS5179) {
+               ucr->is_rts5179 = true;
+               dev_dbg(&ucr->pusb_intf->dev, "Device is rts5179\n");
+       } else {
+               ucr->is_rts5179 = false;
+       }
+
+       return rtsx_usb_reset_chip(ucr);
+}
+
+static int rtsx_usb_probe(struct usb_interface *intf,
+                        const struct usb_device_id *id)
+{
+       struct usb_device *usb_dev = interface_to_usbdev(intf);
+       struct rtsx_ucr *ucr;
+       int ret;
+
+       dev_dbg(&intf->dev,
+               ": Realtek USB Card Reader found at bus %03d address %03d\n",
+                usb_dev->bus->busnum, usb_dev->devnum);
+
+       ucr = devm_kzalloc(&intf->dev, sizeof(*ucr), GFP_KERNEL);
+       if (!ucr)
+               return -ENOMEM;
+
+       ucr->pusb_dev = usb_dev;
+
+       ucr->iobuf = usb_alloc_coherent(ucr->pusb_dev, IOBUF_SIZE,
+                       GFP_KERNEL, &ucr->iobuf_dma);
+       if (!ucr->iobuf)
+               return -ENOMEM;
+
+       usb_set_intfdata(intf, ucr);
+
+       ucr->vendor_id = id->idVendor;
+       ucr->product_id = id->idProduct;
+       ucr->cmd_buf = ucr->rsp_buf = ucr->iobuf;
+
+       mutex_init(&ucr->dev_mutex);
+
+       ucr->pusb_intf = intf;
+
+       /* initialize */
+       ret = rtsx_usb_init_chip(ucr);
+       if (ret)
+               goto out_init_fail;
+
+       /* initialize USB SG transfer timer */
+       timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
+
+       ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
+                                     ARRAY_SIZE(rtsx_usb_cells));
+       if (ret)
+               goto out_init_fail;
+
+#ifdef CONFIG_PM
+       intf->needs_remote_wakeup = 1;
+       usb_enable_autosuspend(usb_dev);
+#endif
+
+       return 0;
+
+out_init_fail:
+       usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
+                       ucr->iobuf_dma);
+       return ret;
+}
+
+static void rtsx_usb_disconnect(struct usb_interface *intf)
+{
+       struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+       dev_dbg(&intf->dev, "%s called\n", __func__);
+
+       mfd_remove_devices(&intf->dev);
+
+       usb_set_intfdata(ucr->pusb_intf, NULL);
+       usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
+                       ucr->iobuf_dma);
+}
+
+#ifdef CONFIG_PM
+static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct rtsx_ucr *ucr =
+               (struct rtsx_ucr *)usb_get_intfdata(intf);
+       u16 val = 0;
+
+       dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
+                       __func__, message.event);
+
+       if (PMSG_IS_AUTO(message)) {
+               if (mutex_trylock(&ucr->dev_mutex)) {
+                       rtsx_usb_get_card_status(ucr, &val);
+                       mutex_unlock(&ucr->dev_mutex);
+
+                       /* Defer the autosuspend if card exists */
+                       if (val & (SD_CD | MS_CD))
+                               return -EAGAIN;
+               } else {
+                       /* There is an ongoing operation*/
+                       return -EAGAIN;
+               }
+       }
+
+       return 0;
+}
+
+static int rtsx_usb_resume(struct usb_interface *intf)
+{
+       return 0;
+}
+
+static int rtsx_usb_reset_resume(struct usb_interface *intf)
+{
+       struct rtsx_ucr *ucr =
+               (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+       rtsx_usb_reset_chip(ucr);
+       return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define rtsx_usb_suspend NULL
+#define rtsx_usb_resume NULL
+#define rtsx_usb_reset_resume NULL
+
+#endif /* CONFIG_PM */
+
+
+static int rtsx_usb_pre_reset(struct usb_interface *intf)
+{
+       struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+       mutex_lock(&ucr->dev_mutex);
+       return 0;
+}
+
+static int rtsx_usb_post_reset(struct usb_interface *intf)
+{
+       struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
+
+       mutex_unlock(&ucr->dev_mutex);
+       return 0;
+}
+
+static struct usb_device_id rtsx_usb_usb_ids[] = {
+       { USB_DEVICE(0x0BDA, 0x0129) },
+       { USB_DEVICE(0x0BDA, 0x0139) },
+       { USB_DEVICE(0x0BDA, 0x0140) },
+       { }
+};
+MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids);
+
+static struct usb_driver rtsx_usb_driver = {
+       .name                   = "rtsx_usb",
+       .probe                  = rtsx_usb_probe,
+       .disconnect             = rtsx_usb_disconnect,
+       .suspend                = rtsx_usb_suspend,
+       .resume                 = rtsx_usb_resume,
+       .reset_resume           = rtsx_usb_reset_resume,
+       .pre_reset              = rtsx_usb_pre_reset,
+       .post_reset             = rtsx_usb_post_reset,
+       .id_table               = rtsx_usb_usb_ids,
+       .supports_autosuspend   = 1,
+       .soft_unbind            = 1,
+};
+
+module_usb_driver(rtsx_usb_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Roger Tseng <rogerable@realtek.com>");
+MODULE_DESCRIPTION("Realtek USB Card Reader Driver");
index bb7fd3f..19969ee 100644 (file)
@@ -2083,6 +2083,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
        /* There should only be one entry, but go through the list
         * anyway
         */
+       if (afu->phb == NULL)
+               return result;
+
        list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                if (!afu_dev->driver)
                        continue;
@@ -2124,8 +2127,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
                         * Tell the AFU drivers; but we don't care what they
                         * say, we're going away.
                         */
-                       if (afu->phb != NULL)
-                               cxl_vphb_error_detected(afu, state);
+                       cxl_vphb_error_detected(afu, state);
                }
                return PCI_ERS_RESULT_DISCONNECT;
        }
@@ -2265,6 +2267,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
                if (cxl_afu_select_best_mode(afu))
                        goto err;
 
+               if (afu->phb == NULL)
+                       continue;
+
                list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                        /* Reset the device context.
                         * TODO: make this less disruptive
@@ -2327,6 +2332,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
        for (i = 0; i < adapter->slices; i++) {
                afu = adapter->afu[i];
 
+               if (afu->phb == NULL)
+                       continue;
+
                list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                        if (afu_dev->driver && afu_dev->driver->err_handler &&
                            afu_dev->driver->err_handler->resume)
index e0b4b36..4d63ac8 100644 (file)
@@ -425,7 +425,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
        memset(msg, 0, sizeof(msg));
        msg[0].addr = client->addr;
        msg[0].buf = addrbuf;
-       addrbuf[0] = 0x90 + offset;
+       /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
+       addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
        msg[0].len = 1;
        msg[1].addr = client->addr;
        msg[1].flags = I2C_M_RD;
@@ -561,18 +562,19 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
 static int at24_read(void *priv, unsigned int off, void *val, size_t count)
 {
        struct at24_data *at24 = priv;
-       struct i2c_client *client;
+       struct device *dev = &at24->client[0]->dev;
        char *buf = val;
        int ret;
 
        if (unlikely(!count))
                return count;
 
-       client = at24_translate_offset(at24, &off);
+       if (off + count > at24->chip.byte_len)
+               return -EINVAL;
 
-       ret = pm_runtime_get_sync(&client->dev);
+       ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
-               pm_runtime_put_noidle(&client->dev);
+               pm_runtime_put_noidle(dev);
                return ret;
        }
 
@@ -588,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
                status = at24->read_func(at24, buf, off, count);
                if (status < 0) {
                        mutex_unlock(&at24->lock);
-                       pm_runtime_put(&client->dev);
+                       pm_runtime_put(dev);
                        return status;
                }
                buf += status;
@@ -598,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
 
        mutex_unlock(&at24->lock);
 
-       pm_runtime_put(&client->dev);
+       pm_runtime_put(dev);
 
        return 0;
 }
@@ -606,18 +608,19 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
 static int at24_write(void *priv, unsigned int off, void *val, size_t count)
 {
        struct at24_data *at24 = priv;
-       struct i2c_client *client;
+       struct device *dev = &at24->client[0]->dev;
        char *buf = val;
        int ret;
 
        if (unlikely(!count))
                return -EINVAL;
 
-       client = at24_translate_offset(at24, &off);
+       if (off + count > at24->chip.byte_len)
+               return -EINVAL;
 
-       ret = pm_runtime_get_sync(&client->dev);
+       ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
-               pm_runtime_put_noidle(&client->dev);
+               pm_runtime_put_noidle(dev);
                return ret;
        }
 
@@ -633,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
                status = at24->write_func(at24, buf, off, count);
                if (status < 0) {
                        mutex_unlock(&at24->lock);
-                       pm_runtime_put(&client->dev);
+                       pm_runtime_put(dev);
                        return status;
                }
                buf += status;
@@ -643,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
 
        mutex_unlock(&at24->lock);
 
-       pm_runtime_put(&client->dev);
+       pm_runtime_put(dev);
 
        return 0;
 }
@@ -730,6 +733,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
                dev_warn(&client->dev,
                        "page_size looks suspicious (no power of 2)!\n");
 
+       /*
+        * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
+        * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
+        *
+        * Eventually we'll get rid of the magic values altoghether in favor of
+        * real structs, but for now just manually set the right size.
+        */
+       if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
+               chip.byte_len = 6;
+
        /* Use I2C operations unless we're stuck with SMBus extensions. */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
                if (chip.flags & AT24_FLAG_ADDR16)
@@ -863,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
        at24->nvmem_config.reg_read = at24_read;
        at24->nvmem_config.reg_write = at24_write;
        at24->nvmem_config.priv = at24;
-       at24->nvmem_config.stride = 4;
+       at24->nvmem_config.stride = 1;
        at24->nvmem_config.word_size = 1;
        at24->nvmem_config.size = chip.byte_len;
 
index eda38cb..41f2a9f 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/pci.h>
 #include <linux/mutex.h>
 #include <linux/miscdevice.h>
-#include <linux/pti.h>
+#include <linux/intel-pti.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
index ea80ff4..ccfa98a 100644 (file)
@@ -122,6 +122,10 @@ struct mmc_blk_data {
        struct device_attribute force_ro;
        struct device_attribute power_ro_lock;
        int     area_type;
+
+       /* debugfs files (only in main mmc_blk_data) */
+       struct dentry *status_dentry;
+       struct dentry *ext_csd_dentry;
 };
 
 /* Device type for RPMB character devices */
@@ -233,9 +237,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
 
        /* Dispatch locking to the block layer */
        req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               count = PTR_ERR(req);
+               goto out_put;
+       }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
        blk_execute_rq(mq->queue, NULL, req, 0);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
+       blk_put_request(req);
 
        if (!ret) {
                pr_info("%s: Locking boot partition ro until next power on\n",
@@ -248,7 +257,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
                                set_disk_ro(part_md->disk, 1);
                        }
        }
-
+out_put:
        mmc_blk_put(md);
        return count;
 }
@@ -624,6 +633,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
        req = blk_get_request(mq->queue,
                idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
                __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto cmd_done;
+       }
        idatas[0] = idata;
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
@@ -691,6 +704,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
        req = blk_get_request(mq->queue,
                idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
                __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto cmd_err;
+       }
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
        req_to_mmc_queue_req(req)->drv_op_data = idata;
@@ -2550,6 +2567,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
 
        /* Ask the block layer about the card status */
        req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
        blk_execute_rq(mq->queue, NULL, req, 0);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
@@ -2557,6 +2576,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
                *val = ret;
                ret = 0;
        }
+       blk_put_request(req);
 
        return ret;
 }
@@ -2583,10 +2603,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
 
        /* Ask the block layer for the EXT CSD */
        req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto out_free;
+       }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
        req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
        blk_execute_rq(mq->queue, NULL, req, 0);
        err = req_to_mmc_queue_req(req)->drv_op_result;
+       blk_put_request(req);
        if (err) {
                pr_err("FAILED %d\n", err);
                goto out_free;
@@ -2632,7 +2657,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
        .llseek         = default_llseek,
 };
 
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
 {
        struct dentry *root;
 
@@ -2642,28 +2667,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card)
        root = card->debugfs_root;
 
        if (mmc_card_mmc(card) || mmc_card_sd(card)) {
-               if (!debugfs_create_file("status", S_IRUSR, root, card,
-                                        &mmc_dbg_card_status_fops))
+               md->status_dentry =
+                       debugfs_create_file("status", S_IRUSR, root, card,
+                                           &mmc_dbg_card_status_fops);
+               if (!md->status_dentry)
                        return -EIO;
        }
 
        if (mmc_card_mmc(card)) {
-               if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
-                                        &mmc_dbg_ext_csd_fops))
+               md->ext_csd_dentry =
+                       debugfs_create_file("ext_csd", S_IRUSR, root, card,
+                                           &mmc_dbg_ext_csd_fops);
+               if (!md->ext_csd_dentry)
                        return -EIO;
        }
 
        return 0;
 }
 
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+                                  struct mmc_blk_data *md)
+{
+       if (!card->debugfs_root)
+               return;
+
+       if (!IS_ERR_OR_NULL(md->status_dentry)) {
+               debugfs_remove(md->status_dentry);
+               md->status_dentry = NULL;
+       }
+
+       if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
+               debugfs_remove(md->ext_csd_dentry);
+               md->ext_csd_dentry = NULL;
+       }
+}
 
 #else
 
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
 {
        return 0;
 }
 
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+                                  struct mmc_blk_data *md)
+{
+}
+
 #endif /* CONFIG_DEBUG_FS */
 
 static int mmc_blk_probe(struct mmc_card *card)
@@ -2703,7 +2753,7 @@ static int mmc_blk_probe(struct mmc_card *card)
        }
 
        /* Add two debugfs entries */
-       mmc_blk_add_debugfs(card);
+       mmc_blk_add_debugfs(card, md);
 
        pm_runtime_set_autosuspend_delay(&card->dev, 3000);
        pm_runtime_use_autosuspend(&card->dev);
@@ -2729,6 +2779,7 @@ static void mmc_blk_remove(struct mmc_card *card)
 {
        struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
 
+       mmc_blk_remove_debugfs(card, md);
        mmc_blk_remove_parts(card, md);
        pm_runtime_get_sync(&card->dev);
        mmc_claim_host(card->host);
index a4b49e2..7586ff2 100644 (file)
@@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev)
                return ret;
 
        ret = host->bus_ops->suspend(host);
+       if (ret)
+               pm_generic_resume(dev);
+
        return ret;
 }
 
index f06cd91..79a5b98 100644 (file)
@@ -75,9 +75,11 @@ struct mmc_fixup {
 #define EXT_CSD_REV_ANY (-1u)
 
 #define CID_MANFID_SANDISK      0x2
+#define CID_MANFID_ATP          0x9
 #define CID_MANFID_TOSHIBA      0x11
 #define CID_MANFID_MICRON       0x13
 #define CID_MANFID_SAMSUNG      0x15
+#define CID_MANFID_APACER       0x27
 #define CID_MANFID_KINGSTON     0x70
 #define CID_MANFID_HYNIX       0x90
 
index 01e459a..0f4a7d7 100644 (file)
@@ -314,4 +314,5 @@ err:
 void mmc_remove_card_debugfs(struct mmc_card *card)
 {
        debugfs_remove_recursive(card->debugfs_root);
+       card->debugfs_root = NULL;
 }
index a552f61..208a762 100644 (file)
@@ -781,7 +781,7 @@ MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
 MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
-MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
        card->ext_csd.device_life_time_est_typ_a,
        card->ext_csd.device_life_time_est_typ_b);
@@ -791,7 +791,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
 
 static ssize_t mmc_fwrev_show(struct device *dev,
@@ -1290,7 +1290,7 @@ out_err:
 
 static void mmc_select_driver_type(struct mmc_card *card)
 {
-       int card_drv_type, drive_strength, drv_type;
+       int card_drv_type, drive_strength, drv_type = 0;
        int fixed_drv_type = card->host->fixed_drv_type;
 
        card_drv_type = card->ext_csd.raw_driver_strength |
index f664e9c..75d3176 100644 (file)
@@ -52,6 +52,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
        MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
                  MMC_QUIRK_BLK_NO_CMD23),
 
+       /*
+        * Some SD cards lockup while using CMD23 multiblock transfers.
+        */
+       MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
+                 MMC_QUIRK_BLK_NO_CMD23),
+       MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
+                 MMC_QUIRK_BLK_NO_CMD23),
+
        /*
         * Some MMC cards need longer data read timeout than indicated in CSD.
         */
index 45bf78f..62b84dd 100644 (file)
@@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 
 
 static ssize_t mmc_dsr_show(struct device *dev,
index 567028c..cec8152 100644 (file)
@@ -838,14 +838,14 @@ config MMC_USDHI6ROL0
 
 config MMC_REALTEK_PCI
        tristate "Realtek PCI-E SD/MMC Card Interface Driver"
-       depends on MFD_RTSX_PCI
+       depends on MISC_RTSX_PCI
        help
          Say Y here to include driver code to support SD/MMC card interface
          of Realtek PCI-E card reader
 
 config MMC_REALTEK_USB
        tristate "Realtek USB SD/MMC Card Interface Driver"
-       depends on MFD_RTSX_USB
+       depends on MISC_RTSX_USB
        help
          Say Y here to include driver code to support SD/MMC card interface
          of Realtek RTS5129/39 series card reader
index fcf7235..157e1d9 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/mmc/host.h>
@@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+
+MODULE_LICENSE("GPL v2");
index 0848dc0..30bd808 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/mmc/sd.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/card.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
 #include <asm/unaligned.h>
 
 struct realtek_pci_sdmmc {
index 76da168..7842207 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/scatterlist.h>
 #include <linux/pm_runtime.h>
 
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
 #include <asm/unaligned.h>
 
 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
index f7f157a..555c7f1 100644 (file)
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
 struct s3cmci_reg {
        unsigned short  addr;
        unsigned char   *name;
-} debug_regs[] = {
+};
+
+static const struct s3cmci_reg debug_regs[] = {
        DBG_REG(CON),
        DBG_REG(PRE),
        DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
 static int s3cmci_regs_show(struct seq_file *seq, void *v)
 {
        struct s3cmci_host *host = seq->private;
-       struct s3cmci_reg *rptr = debug_regs;
+       const struct s3cmci_reg *rptr = debug_regs;
 
        for (; rptr->name; rptr++)
                seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
index 85140c9..8b941f8 100644 (file)
@@ -687,6 +687,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
                return;
        }
 
+       /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
+       if (is_imx53_esdhc(imx_data)) {
+               /*
+                * According to the i.MX53 reference manual, if DLLCTRL[10] can
+                * be set, then the controller is eSDHCv3, else it is eSDHCv2.
+                */
+               val = readl(host->ioaddr + ESDHC_DLL_CTRL);
+               writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
+               temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
+               writel(val, host->ioaddr + ESDHC_DLL_CTRL);
+               if (temp & BIT(10))
+                       pre_div = 2;
+       }
+
        temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
        temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
                | ESDHC_CLOCK_MASK);
index 3fb7d2e..c283291 100644 (file)
@@ -29,6 +29,9 @@
 #define CORE_VERSION_MAJOR_MASK                (0xf << CORE_VERSION_MAJOR_SHIFT)
 #define CORE_VERSION_MINOR_MASK                0xff
 
+#define CORE_MCI_GENERICS              0x70
+#define SWITCHABLE_SIGNALING_VOLTAGE   BIT(29)
+
 #define CORE_HC_MODE           0x78
 #define HC_MODE_EN             0x1
 #define CORE_POWER             0x0
@@ -1028,11 +1031,22 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
        bool done = false;
+       u32 val;
 
        pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
                        mmc_hostname(host->mmc), __func__, req_type,
                        msm_host->curr_pwr_state, msm_host->curr_io_level);
 
+       /*
+        * The power interrupt will not be generated for signal voltage
+        * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
+        */
+       val = readl(msm_host->core_mem + CORE_MCI_GENERICS);
+       if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
+           !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
+               return;
+       }
+
        /*
         * The IRQ for request type IO High/LOW will be generated when -
         * there is a state change in 1.8V enable bit (bit 3) of
index 2f14334..e9290a3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
@@ -3650,23 +3651,30 @@ int sdhci_setup_host(struct sdhci_host *host)
 
        spin_lock_init(&host->lock);
 
+       /*
+        * Maximum number of sectors in one transfer. Limited by SDMA boundary
+        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+        * is less anyway.
+        */
+       mmc->max_req_size = 524288;
+
        /*
         * Maximum number of segments. Depends on if the hardware
         * can do scatter/gather or not.
         */
-       if (host->flags & SDHCI_USE_ADMA)
+       if (host->flags & SDHCI_USE_ADMA) {
                mmc->max_segs = SDHCI_MAX_SEGS;
-       else if (host->flags & SDHCI_USE_SDMA)
+       } else if (host->flags & SDHCI_USE_SDMA) {
                mmc->max_segs = 1;
-       else /* PIO */
+               if (swiotlb_max_segment()) {
+                       unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
+                                               IO_TLB_SEGSIZE;
+                       mmc->max_req_size = min(mmc->max_req_size,
+                                               max_req_size);
+               }
+       } else { /* PIO */
                mmc->max_segs = SDHCI_MAX_SEGS;
-
-       /*
-        * Maximum number of sectors in one transfer. Limited by SDMA boundary
-        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
-        * is less anyway.
-        */
-       mmc->max_req_size = 524288;
+       }
 
        /*
         * Maximum segment size. Could be one segment with the maximum number
index d7ab091..28553c8 100644 (file)
@@ -1146,7 +1146,7 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
        if (!ops->oobbuf)
                ops->ooblen = 0;
 
-       if (offs < 0 || offs + ops->len >= mtd->size)
+       if (offs < 0 || offs + ops->len > mtd->size)
                return -EINVAL;
 
        if (ops->ooblen) {
index e43fea8..d58a61c 100644 (file)
@@ -79,14 +79,14 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
        pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
              mtd->index, mtd->name);
 
-       ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+       ret = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
        if (ret < 0) {
                deactivate_locked_super(sb);
                return ERR_PTR(ret);
        }
 
        /* go */
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
        return dget(sb->s_root);
 
        /* new mountpoint for an already mounted superblock */
@@ -202,7 +202,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
 not_an_MTD_device:
 #endif /* CONFIG_BLOCK */
 
-       if (!(flags & MS_SILENT))
+       if (!(flags & SB_SILENT))
                printk(KERN_NOTICE
                       "MTD: Attempt to mount non-MTD device \"%s\"\n",
                       dev_name);
index b81ddba..c28fd2b 100644 (file)
@@ -1762,7 +1762,7 @@ try_dmaread:
                        err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
                                                              addr);
                        /* erased page bitflips corrected */
-                       if (err > 0)
+                       if (err >= 0)
                                return err;
                }
 
index 484f7fb..a8bde66 100644 (file)
@@ -253,9 +253,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
                goto out_ce;
        }
 
-       gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
-       if (IS_ERR(gpiomtd->nwp)) {
-               ret = PTR_ERR(gpiomtd->nwp);
+       gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiomtd->ale)) {
+               ret = PTR_ERR(gpiomtd->ale);
                goto out_ce;
        }
 
index ab9a0a2..61fdd73 100644 (file)
@@ -1069,9 +1069,6 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
                return ret;
        }
 
-       /* handle the block mark swapping */
-       block_mark_swapping(this, payload_virt, auxiliary_virt);
-
        /* Loop over status bytes, accumulating ECC status. */
        status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
 
@@ -1160,6 +1157,9 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
                max_bitflips = max_t(unsigned int, max_bitflips, *status);
        }
 
+       /* handle the block mark swapping */
+       block_mark_swapping(this, buf, auxiliary_virt);
+
        if (oob_required) {
                /*
                 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
index 021374f..d1979c7 100644 (file)
@@ -961,6 +961,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
 
        switch (command) {
        case NAND_CMD_READ0:
+       case NAND_CMD_READOOB:
        case NAND_CMD_PAGEPROG:
                info->use_ecc = 1;
                break;
index 2260063..6e5cf9d 100644 (file)
@@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data)
        return dev->of_node == data;
 }
 
+/* Note this function returns a reference to the mux_chip dev. */
 static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
 {
        struct device *dev;
@@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
            (!args.args_count && (mux_chip->controllers > 1))) {
                dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
                        np, args.np);
+               put_device(&mux_chip->dev);
                return ERR_PTR(-EINVAL);
        }
 
@@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
        if (controller >= mux_chip->controllers) {
                dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
                        np, controller, args.np);
+               put_device(&mux_chip->dev);
                return ERR_PTR(-EINVAL);
        }
 
-       get_device(&mux_chip->dev);
        return &mux_chip->mux[controller];
 }
 EXPORT_SYMBOL_GPL(mux_control_get);
index a1b33aa..9697977 100644 (file)
@@ -423,7 +423,7 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
                        return -EINVAL;
 
                bond_opt_initval(&newval,
-                                nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+                                nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
                err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
                if (err)
                        return err;
index a13a489..760d2c0 100644 (file)
  * Below is some version info we got:
  *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
  *                                Filter? connected?  Passive detection  ception in MB
- *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX25  FlexCAN2  03.00.00.00     no        no        no       no        no
  *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
- *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no        no       no        no
  *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
  *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
- *   VF610 FlexCAN3  ?               no       yes         ?      yes       yes?
+ *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
@@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
 
 static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
        .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
-               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+               FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -525,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
                data = be32_to_cpup((__be32 *)&cf->data[0]);
                flexcan_write(data, &priv->tx_mb->data[0]);
        }
-       if (cf->can_dlc > 3) {
+       if (cf->can_dlc > 4) {
                data = be32_to_cpup((__be32 *)&cf->data[4]);
                flexcan_write(data, &priv->tx_mb->data[1]);
        }
index 85268be..5551341 100644 (file)
@@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
        /* if this frame is an echo, */
        if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
            !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
-               int n;
                unsigned long flags;
 
                spin_lock_irqsave(&priv->echo_lock, flags);
-               n = can_get_echo_skb(priv->ndev, msg->client);
+               can_get_echo_skb(priv->ndev, msg->client);
                spin_unlock_irqrestore(&priv->echo_lock, flags);
 
                /* count bytes of the echo instead of skb */
                stats->tx_bytes += cf_len;
                stats->tx_packets++;
 
-               if (n) {
-                       /* restart tx queue only if a slot is free */
-                       netif_wake_queue(priv->ndev);
-               }
+               /* restart tx queue (a slot is free) */
+               netif_wake_queue(priv->ndev);
 
                return 0;
        }
index b4efd71..788c346 100644 (file)
@@ -825,7 +825,10 @@ err_release_regions:
 err_disable_pci:
        pci_disable_device(pdev);
 
-       return err;
+       /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+        * the probe() function must return a negative errno in case of failure
+        * (err is unchanged if negative) */
+       return pcibios_err_to_errno(err);
 }
 
 /* free the board structure object, as well as its resources: */
index 131026f..5adc95c 100644 (file)
@@ -717,7 +717,10 @@ failure_release_regions:
 failure_disable_pci:
        pci_disable_device(pdev);
 
-       return err;
+       /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+        * the probe() function must return a negative errno in case of failure
+        * (err is unchanged if negative) */
+       return pcibios_err_to_errno(err);
 }
 
 static void peak_pci_remove(struct pci_dev *pdev)
index 4d49414..db6ea93 100644 (file)
@@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
                mbx_mask = hecc_read(priv, HECC_CANMIM);
                mbx_mask |= HECC_TX_MBOX_MASK;
                hecc_write(priv, HECC_CANMIM, mbx_mask);
+       } else {
+               /* repoll is done only if whole budget is used */
+               num_pkts = quota;
        }
 
        return num_pkts;
index b3d0275..12ff002 100644 (file)
@@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
 
        case -ECONNRESET: /* unlink */
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
 
@@ -393,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
 
                if (dev->can.state == CAN_STATE_ERROR_WARNING ||
                    dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+                       cf->can_id |= CAN_ERR_CRTL;
                        cf->data[1] = (txerr > rxerr) ?
                            CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
                }
index 9fdb0f0..c6dcf93 100644 (file)
@@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
                break;
 
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
 
index 68ac3e8..8bf80ad 100644 (file)
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
                dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
                        rc);
 
-       return rc;
+       return (rc > 0) ? 0 : rc;
 }
 
 static void gs_usb_xmit_callback(struct urb *urb)
index 9b18d96..63587b8 100644 (file)
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
                        }
 
                        if (pos + tmp->len > actual_len) {
-                               dev_err(dev->udev->dev.parent,
-                                       "Format error\n");
+                               dev_err_ratelimited(dev->udev->dev.parent,
+                                                   "Format error\n");
                                break;
                        }
 
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
        if (err) {
                netdev_err(netdev, "Error transmitting URB\n");
                usb_unanchor_urb(urb);
+               kfree(buf);
                usb_free_urb(urb);
                return err;
        }
@@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
        case 0:
                break;
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
        default:
@@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                goto resubmit_urb;
        }
 
-       while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+       while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
                msg = urb->transfer_buffer + pos;
 
                /* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                }
 
                if (pos + msg->len > urb->actual_length) {
-                       dev_err(dev->udev->dev.parent, "Format error\n");
+                       dev_err_ratelimited(dev->udev->dev.parent,
+                                           "Format error\n");
                        break;
                }
 
@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
                usb_unanchor_urb(urb);
+               kfree(buf);
 
                stats->tx_dropped++;
 
index 7f02725..8d8c208 100644 (file)
@@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
                break;
 
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
 
@@ -862,7 +864,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
                goto cleanup_unregister_candev;
        }
 
-       dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n");
+       dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");
 
        return 0;
 
index 7ccdc3e..53d6bb0 100644 (file)
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
        void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
        int err = 0;
        u8 *packet_ptr;
-       int i, n = 1, packet_len;
+       int packet_len;
        ptrdiff_t cmd_len;
 
        /* usb device unregistered? */
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
        }
 
        packet_ptr = cmd_head;
+       packet_len = cmd_len;
 
        /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
-       if ((dev->udev->speed != USB_SPEED_HIGH) &&
-           (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
-               packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
-               n += cmd_len / packet_len;
-       } else {
-               packet_len = cmd_len;
-       }
+       if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
+               packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
 
-       for (i = 0; i < n; i++) {
+       do {
                err = usb_bulk_msg(dev->udev,
                                   usb_sndbulkpipe(dev->udev,
                                                   PCAN_USBPRO_EP_CMDOUT),
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
                }
 
                packet_ptr += packet_len;
-       }
+               cmd_len -= packet_len;
+
+               if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
+                       packet_len = cmd_len;
+
+       } while (packet_len > 0);
 
        return err;
 }
index d000cb6..27861c4 100644 (file)
@@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
                break;
 
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
 
index 8404e88..b4c4a2c 100644 (file)
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
                tbp = peer_tb;
        }
 
-       if (tbp[IFLA_IFNAME]) {
+       if (ifmp && tbp[IFLA_IFNAME]) {
                nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
                name_assign_type = NET_NAME_USER;
        } else {
index f5a8dd9..4498ab8 100644 (file)
@@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
 {
        struct b53_device *dev = ds->priv;
 
-       /* Older models support a different tag format that we do not
-        * support in net/dsa/tag_brcm.c yet.
+       /* Older models (5325, 5365) support a different tag format that we do
+        * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
+        * mode to be turned on which means we need to specifically manage ARL
+        * misses on multicast addresses (TBD).
         */
-       if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
+       if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
+           !b53_can_enable_brcm_tags(ds, port))
                return DSA_TAG_PROTO_NONE;
 
        /* Broadcom BCM58xx chips have a flow accelerator on Port 8
index ea01f24..b62d472 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/netdevice.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
-#include <linux/of.h>
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/mii.h>
index b721a20..23b45da 100644 (file)
@@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
                                slice_num, false);
        bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
-                               slice_num, true);
+                               SLICE_NUM_MASK, true);
 
        /* Insert into TCAM now because we need to insert a second rule */
        bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        /* Insert into Action and policer RAMs now, set chain ID to
         * the one we are chained to
         */
-       ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
+       ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
                                      queue_num, true);
        if (ret)
                goto out_err;
index 8171055..66d33e9 100644 (file)
@@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
        u16 mask;
 
        mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
-       mask |= GENMASK(chip->g1_irq.nirqs, 0);
+       mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
        mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
 
        free_irq(chip->irq, chip);
@@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
        return 0;
 
 out_disable:
-       mask |= GENMASK(chip->g1_irq.nirqs, 0);
+       mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
        mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
 
 out_mapping:
@@ -2177,6 +2177,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
        { },
 };
 
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
+
+{
+       struct mv88e6xxx_mdio_bus *mdio_bus;
+       struct mii_bus *bus;
+
+       list_for_each_entry(mdio_bus, &chip->mdios, list) {
+               bus = mdio_bus->bus;
+
+               mdiobus_unregister(bus);
+       }
+}
+
 static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
                                    struct device_node *np)
 {
@@ -2201,27 +2214,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
                match = of_match_node(mv88e6xxx_mdio_external_match, child);
                if (match) {
                        err = mv88e6xxx_mdio_register(chip, child, true);
-                       if (err)
+                       if (err) {
+                               mv88e6xxx_mdios_unregister(chip);
                                return err;
+                       }
                }
        }
 
        return 0;
 }
 
-static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
-
-{
-       struct mv88e6xxx_mdio_bus *mdio_bus;
-       struct mii_bus *bus;
-
-       list_for_each_entry(mdio_bus, &chip->mdios, list) {
-               bus = mdio_bus->bus;
-
-               mdiobus_unregister(bus);
-       }
-}
-
 static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
index a7801f6..6315774 100644 (file)
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
                break;
        case PHY_INTERFACE_MODE_XGMII:
+       case PHY_INTERFACE_MODE_XAUI:
                cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
                break;
        case PHY_INTERFACE_MODE_RXAUI:
index f4e13a7..36c8950 100644 (file)
@@ -602,7 +602,7 @@ struct vortex_private {
        struct sk_buff* rx_skbuff[RX_RING_SIZE];
        struct sk_buff* tx_skbuff[TX_RING_SIZE];
        unsigned int cur_rx, cur_tx;            /* The next free ring entry */
-       unsigned int dirty_rx, dirty_tx;        /* The ring entries to be free()ed. */
+       unsigned int dirty_tx;  /* The ring entries to be free()ed. */
        struct vortex_extra_stats xstats;       /* NIC-specific extra stats */
        struct sk_buff *tx_skb;                         /* Packet being eaten by bus master ctrl.  */
        dma_addr_t tx_skb_dma;                          /* Allocated DMA address for bus master ctrl DMA.   */
@@ -618,7 +618,6 @@ struct vortex_private {
 
        /* The remainder are related to chip state, mostly media selection. */
        struct timer_list timer;                        /* Media selection timer. */
-       struct timer_list rx_oom_timer;         /* Rx skb allocation retry timer */
        int options;                                            /* User-settable misc. driver options. */
        unsigned int media_override:4,          /* Passed-in media type. */
                default_media:4,                                /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
 static int mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
 static void vortex_timer(struct timer_list *t);
-static void rx_oom_timer(struct timer_list *t);
 static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
                                     struct net_device *dev);
 static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
 
        timer_setup(&vp->timer, vortex_timer, 0);
        mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
-       timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
 
        if (vortex_debug > 1)
                pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
        window_write16(vp, 0x0040, 4, Wn4_NetDiag);
 
        if (vp->full_bus_master_rx) { /* Boomerang bus master. */
-               vp->cur_rx = vp->dirty_rx = 0;
+               vp->cur_rx = 0;
                /* Initialize the RxEarly register as recommended. */
                iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
                iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
        struct vortex_private *vp = netdev_priv(dev);
        int i;
        int retval;
+       dma_addr_t dma;
 
        /* Use the now-standard shared IRQ implementation. */
        if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
                                break;                  /* Bad news!  */
 
                        skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
-                       vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+                       dma = pci_map_single(VORTEX_PCI(vp), skb->data,
+                                            PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+                       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+                               break;
+                       vp->rx_ring[i].addr = cpu_to_le32(dma);
                }
                if (i != RX_RING_SIZE) {
                        pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
                int len = (skb->len + 3) & ~3;
                vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
                                                PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+                       dev_kfree_skb_any(skb);
+                       dev->stats.tx_dropped++;
+                       return NETDEV_TX_OK;
+               }
+
                spin_lock_irq(&vp->window_lock);
                window_set(vp, 7);
                iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
        int entry = vp->cur_rx % RX_RING_SIZE;
        void __iomem *ioaddr = vp->ioaddr;
        int rx_status;
-       int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+       int rx_work_limit = RX_RING_SIZE;
 
        if (vortex_debug > 5)
                pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
                } else {
                        /* The packet length: up to 4.5K!. */
                        int pkt_len = rx_status & 0x1fff;
-                       struct sk_buff *skb;
+                       struct sk_buff *skb, *newskb;
+                       dma_addr_t newdma;
                        dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
 
                        if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
                                pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                                vp->rx_copy++;
                        } else {
+                               /* Pre-allocate the replacement skb.  If it or its
+                                * mapping fails then recycle the buffer thats already
+                                * in place
+                                */
+                               newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+                               if (!newskb) {
+                                       dev->stats.rx_dropped++;
+                                       goto clear_complete;
+                               }
+                               newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
+                                                       PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+                               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+                                       dev->stats.rx_dropped++;
+                                       consume_skb(newskb);
+                                       goto clear_complete;
+                               }
+
                                /* Pass up the skbuff already on the Rx ring. */
                                skb = vp->rx_skbuff[entry];
-                               vp->rx_skbuff[entry] = NULL;
+                               vp->rx_skbuff[entry] = newskb;
+                               vp->rx_ring[entry].addr = cpu_to_le32(newdma);
                                skb_put(skb, pkt_len);
                                pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                                vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
                        netif_rx(skb);
                        dev->stats.rx_packets++;
                }
-               entry = (++vp->cur_rx) % RX_RING_SIZE;
-       }
-       /* Refill the Rx ring buffers. */
-       for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
-               struct sk_buff *skb;
-               entry = vp->dirty_rx % RX_RING_SIZE;
-               if (vp->rx_skbuff[entry] == NULL) {
-                       skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
-                       if (skb == NULL) {
-                               static unsigned long last_jif;
-                               if (time_after(jiffies, last_jif + 10 * HZ)) {
-                                       pr_warn("%s: memory shortage\n",
-                                               dev->name);
-                                       last_jif = jiffies;
-                               }
-                               if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
-                                       mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
-                               break;                  /* Bad news!  */
-                       }
 
-                       vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
-                       vp->rx_skbuff[entry] = skb;
-               }
+clear_complete:
                vp->rx_ring[entry].status = 0;  /* Clear complete bit. */
                iowrite16(UpUnstall, ioaddr + EL3_CMD);
+               entry = (++vp->cur_rx) % RX_RING_SIZE;
        }
        return 0;
 }
 
-/*
- * If we've hit a total OOM refilling the Rx ring we poll once a second
- * for some memory.  Otherwise there is no way to restart the rx process.
- */
-static void
-rx_oom_timer(struct timer_list *t)
-{
-       struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
-       struct net_device *dev = vp->mii.dev;
-
-       spin_lock_irq(&vp->lock);
-       if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)        /* This test is redundant, but makes me feel good */
-               boomerang_rx(dev);
-       if (vortex_debug > 1) {
-               pr_debug("%s: rx_oom_timer %s\n", dev->name,
-                       ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
-       }
-       spin_unlock_irq(&vp->lock);
-}
-
 static void
 vortex_down(struct net_device *dev, int final_down)
 {
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
        netdev_reset_queue(dev);
        netif_stop_queue(dev);
 
-       del_timer_sync(&vp->rx_oom_timer);
        del_timer_sync(&vp->timer);
 
        /* Turn off statistics ASAP.  We update dev->stats below. */
index 97c5a89..fbe21a8 100644 (file)
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
 
 static int ena_rss_init_default(struct ena_adapter *adapter);
+static void check_for_admin_com_state(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter);
+static int ena_restore_device(struct ena_adapter *adapter);
 
 static void ena_tx_timeout(struct net_device *dev)
 {
@@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
 
 static int ena_up_complete(struct ena_adapter *adapter)
 {
-       int rc, i;
+       int rc;
 
        rc = ena_rss_configure(adapter);
        if (rc)
@@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
 
        ena_napi_enable_all(adapter);
 
-       /* Enable completion queues interrupt */
-       for (i = 0; i < adapter->num_queues; i++)
-               ena_unmask_interrupt(&adapter->tx_ring[i],
-                                    &adapter->rx_ring[i]);
-
-       /* schedule napi in case we had pending packets
-        * from the last time we disable napi
-        */
-       for (i = 0; i < adapter->num_queues; i++)
-               napi_schedule(&adapter->ena_napi[i].napi);
-
        return 0;
 }
 
@@ -1731,7 +1723,7 @@ create_err:
 
 static int ena_up(struct ena_adapter *adapter)
 {
-       int rc;
+       int rc, i;
 
        netdev_dbg(adapter->netdev, "%s\n", __func__);
 
@@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
 
        set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
 
+       /* Enable completion queues interrupt */
+       for (i = 0; i < adapter->num_queues; i++)
+               ena_unmask_interrupt(&adapter->tx_ring[i],
+                                    &adapter->rx_ring[i]);
+
+       /* schedule napi in case we had pending packets
+        * from the last time we disable napi
+        */
+       for (i = 0; i < adapter->num_queues; i++)
+               napi_schedule(&adapter->ena_napi[i].napi);
+
        return rc;
 
 err_up:
@@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
        if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
                ena_down(adapter);
 
+       /* Check for device status and issue reset if needed*/
+       check_for_admin_com_state(adapter);
+       if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+               netif_err(adapter, ifdown, adapter->netdev,
+                         "Destroy failure, restarting device\n");
+               ena_dump_stats_to_dmesg(adapter);
+               /* rtnl lock already obtained in dev_ioctl() layer */
+               ena_destroy_device(adapter);
+               ena_restore_device(adapter);
+       }
+
        return 0;
 }
 
@@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
 
        ena_com_set_admin_running_state(ena_dev, false);
 
-       ena_close(netdev);
+       if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+               ena_down(adapter);
 
        /* Before releasing the ENA resources, a device reset is required.
         * (to prevent the device from accessing them).
-        * In case the reset flag is set and the device is up, ena_close
+        * In case the reset flag is set and the device is up, ena_down()
         * already perform the reset, so it can be skipped.
         */
        if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
index 57e7968..105fdb9 100644 (file)
@@ -50,7 +50,7 @@
 #define AQ_CFG_PCI_FUNC_MSIX_IRQS   9U
 #define AQ_CFG_PCI_FUNC_PORTS       2U
 
-#define AQ_CFG_SERVICE_TIMER_INTERVAL    (2 * HZ)
+#define AQ_CFG_SERVICE_TIMER_INTERVAL    (1 * HZ)
 #define AQ_CFG_POLLING_TIMER_INTERVAL   ((unsigned int)(2 * HZ))
 
 #define AQ_CFG_SKB_FRAGS_MAX   32U
@@ -80,6 +80,7 @@
 #define AQ_CFG_DRV_VERSION     __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
                                __stringify(NIC_MINOR_DRIVER_VERSION)"."\
                                __stringify(NIC_BUILD_DRIVER_VERSION)"."\
-                               __stringify(NIC_REVISION_DRIVER_VERSION)
+                               __stringify(NIC_REVISION_DRIVER_VERSION) \
+                               AQ_CFG_DRV_VERSION_SUFFIX
 
 #endif /* AQ_CFG_H */
index 70efb74..f2d8063 100644 (file)
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
        "OutUCast",
        "OutMCast",
        "OutBCast",
-       "InUCastOctects",
-       "OutUCastOctects",
-       "InMCastOctects",
-       "OutMCastOctects",
-       "InBCastOctects",
-       "OutBCastOctects",
-       "InOctects",
-       "OutOctects",
+       "InUCastOctets",
+       "OutUCastOctets",
+       "InMCastOctets",
+       "OutMCastOctets",
+       "InBCastOctets",
+       "OutBCastOctets",
+       "InOctets",
+       "OutOctets",
        "InPacketsDma",
        "OutPacketsDma",
        "InOctetsDma",
index 0207927..b3825de 100644 (file)
@@ -46,6 +46,28 @@ struct aq_hw_link_status_s {
        unsigned int mbps;
 };
 
+struct aq_stats_s {
+       u64 uprc;
+       u64 mprc;
+       u64 bprc;
+       u64 erpt;
+       u64 uptc;
+       u64 mptc;
+       u64 bptc;
+       u64 erpr;
+       u64 mbtc;
+       u64 bbtc;
+       u64 mbrc;
+       u64 bbrc;
+       u64 ubrc;
+       u64 ubtc;
+       u64 dpc;
+       u64 dma_pkt_rc;
+       u64 dma_pkt_tc;
+       u64 dma_oct_rc;
+       u64 dma_oct_tc;
+};
+
 #define AQ_HW_IRQ_INVALID 0U
 #define AQ_HW_IRQ_LEGACY  1U
 #define AQ_HW_IRQ_MSI     2U
@@ -85,7 +107,9 @@ struct aq_hw_ops {
        void (*destroy)(struct aq_hw_s *self);
 
        int (*get_hw_caps)(struct aq_hw_s *self,
-                          struct aq_hw_caps_s *aq_hw_caps);
+                          struct aq_hw_caps_s *aq_hw_caps,
+                          unsigned short device,
+                          unsigned short subsystem_device);
 
        int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
                               unsigned int frags);
@@ -164,8 +188,7 @@ struct aq_hw_ops {
 
        int (*hw_update_stats)(struct aq_hw_s *self);
 
-       int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
-                              unsigned int *p_count);
+       struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
 
        int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
 
index 78dfb2a..75a894a 100644 (file)
@@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;
 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
 
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
+
 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
 {
        struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -166,11 +168,8 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
 static void aq_nic_service_timer_cb(struct timer_list *t)
 {
        struct aq_nic_s *self = from_timer(self, t, service_timer);
-       struct net_device *ndev = aq_nic_get_ndev(self);
+       int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
        int err = 0;
-       unsigned int i = 0U;
-       struct aq_ring_stats_rx_s stats_rx;
-       struct aq_ring_stats_tx_s stats_tx;
 
        if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
                goto err_exit;
@@ -182,23 +181,14 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
        if (self->aq_hw_ops.hw_update_stats)
                self->aq_hw_ops.hw_update_stats(self->aq_hw);
 
-       memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
-       memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
-       for (i = AQ_DIMOF(self->aq_vec); i--;) {
-               if (self->aq_vec[i])
-                       aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
-       }
+       aq_nic_update_ndev_stats(self);
 
-       ndev->stats.rx_packets = stats_rx.packets;
-       ndev->stats.rx_bytes = stats_rx.bytes;
-       ndev->stats.rx_errors = stats_rx.errors;
-       ndev->stats.tx_packets = stats_tx.packets;
-       ndev->stats.tx_bytes = stats_tx.bytes;
-       ndev->stats.tx_errors = stats_tx.errors;
+       /* If no link - use faster timer rate to detect link up asap */
+       if (!netif_carrier_ok(self->ndev))
+               ctimer = max(ctimer / 2, 1);
 
 err_exit:
-       mod_timer(&self->service_timer,
-                 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+       mod_timer(&self->service_timer, jiffies + ctimer);
 }
 
 static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)
 
 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
                                   const struct ethtool_ops *et_ops,
-                                  struct device *dev,
+                                  struct pci_dev *pdev,
                                   struct aq_pci_func_s *aq_pci_func,
                                   unsigned int port,
                                   const struct aq_hw_ops *aq_hw_ops)
@@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
        ndev->netdev_ops = ndev_ops;
        ndev->ethtool_ops = et_ops;
 
-       SET_NETDEV_DEV(ndev, dev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
 
        ndev->if_port = port;
        self->ndev = ndev;
@@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
 
        self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
                                                &self->aq_hw_ops);
-       err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
+       err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
+                                         pdev->device, pdev->subsystem_device);
        if (err < 0)
                goto err_exit;
 
@@ -749,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
 
 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
 {
-       struct aq_vec_s *aq_vec = NULL;
        unsigned int i = 0U;
        unsigned int count = 0U;
-       int err = 0;
+       struct aq_vec_s *aq_vec = NULL;
+       struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
 
-       err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
-       if (err < 0)
+       if (!stats)
                goto err_exit;
 
-       data += count;
+       data[i] = stats->uprc + stats->mprc + stats->bprc;
+       data[++i] = stats->uprc;
+       data[++i] = stats->mprc;
+       data[++i] = stats->bprc;
+       data[++i] = stats->erpt;
+       data[++i] = stats->uptc + stats->mptc + stats->bptc;
+       data[++i] = stats->uptc;
+       data[++i] = stats->mptc;
+       data[++i] = stats->bptc;
+       data[++i] = stats->ubrc;
+       data[++i] = stats->ubtc;
+       data[++i] = stats->mbrc;
+       data[++i] = stats->mbtc;
+       data[++i] = stats->bbrc;
+       data[++i] = stats->bbtc;
+       data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+       data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+       data[++i] = stats->dma_pkt_rc;
+       data[++i] = stats->dma_pkt_tc;
+       data[++i] = stats->dma_oct_rc;
+       data[++i] = stats->dma_oct_tc;
+       data[++i] = stats->dpc;
+
+       i++;
+
+       data += i;
        count = 0U;
 
        for (i = 0U, aq_vec = self->aq_vec[0];
@@ -768,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
        }
 
 err_exit:;
-       (void)err;
+}
+
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
+{
+       struct net_device *ndev = self->ndev;
+       struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+
+       ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
+       ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
+       ndev->stats.rx_errors = stats->erpr;
+       ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
+       ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
+       ndev->stats.tx_errors = stats->erpt;
+       ndev->stats.multicast = stats->mprc;
 }
 
 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
index 4309983..3c9f8db 100644 (file)
@@ -71,7 +71,7 @@ struct aq_nic_cfg_s {
 
 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
                                   const struct ethtool_ops *et_ops,
-                                  struct device *dev,
+                                  struct pci_dev *pdev,
                                   struct aq_pci_func_s *aq_pci_func,
                                   unsigned int port,
                                   const struct aq_hw_ops *aq_hw_ops);
index cadaa64..58c29d0 100644 (file)
@@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
        pci_set_drvdata(pdev, self);
        self->pdev = pdev;
 
-       err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
+       err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
+                                    pdev->subsystem_device);
        if (err < 0)
                goto err_exit;
 
@@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
 
        for (port = 0; port < self->ports; ++port) {
                struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
-                                                           &pdev->dev, self,
+                                                           pdev, self,
                                                            port, aq_hw_ops);
 
                if (!aq_nic) {
index 07b3c49..f18dce1 100644 (file)
 #include "hw_atl_a0_internal.h"
 
 static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
-                                struct aq_hw_caps_s *aq_hw_caps)
+                                struct aq_hw_caps_s *aq_hw_caps,
+                                unsigned short device,
+                                unsigned short subsystem_device)
 {
        memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
+
+       if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
+
+       if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
+       }
+
        return 0;
 }
 
@@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
        hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
        hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
 
+       /* Reset link status and read out initial hardware counters */
+       self->aq_link_status.mbps = 0;
+       hw_atl_utils_update_stats(self);
+
        err = aq_hw_err_from_flags(self);
        if (err < 0)
                goto err_exit;
index ec68c20..e4a22ce 100644 (file)
 #include "hw_atl_utils.h"
 #include "hw_atl_llh.h"
 #include "hw_atl_b0_internal.h"
+#include "hw_atl_llh_internal.h"
 
 static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
-                                struct aq_hw_caps_s *aq_hw_caps)
+                                struct aq_hw_caps_s *aq_hw_caps,
+                                unsigned short device,
+                                unsigned short subsystem_device)
 {
        memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
+
+       if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
+
+       if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
+       }
+
        return 0;
 }
 
@@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
        };
 
        int err = 0;
+       u32 val;
 
        self->aq_nic_cfg = aq_nic_cfg;
 
@@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
        hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
        hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
 
+       /* Force limit MRRS on RDM/TDM to 2K */
+       val = aq_hw_read_reg(self, pci_reg_control6_adr);
+       aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+
+       /* TX DMA total request limit. B0 hardware is not capable to
+        * handle more than (8K-MRRS) incoming DMA data.
+        * Value 24 in 256byte units
+        */
+       aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+
+       /* Reset link status and read out initial hardware counters */
+       self->aq_link_status.mbps = 0;
+       hw_atl_utils_update_stats(self);
+
        err = aq_hw_err_from_flags(self);
        if (err < 0)
                goto err_exit;
index 5527fc0..93450ec 100644 (file)
 #define tx_dma_desc_base_addrmsw_adr(descriptor) \
                        (0x00007c04u + (descriptor) * 0x40)
 
+/* tx dma total request limit */
+#define tx_dma_total_req_limit_adr 0x00007b20u
+
 /* tx interrupt moderation control register definitions
  * Preprocessor definitions for TX Interrupt Moderation Control Register
  * Base Address: 0x00008980
 /* default value of bitfield reg_res_dsbl */
 #define pci_reg_res_dsbl_default 0x1
 
+/* PCI core control register */
+#define pci_reg_control6_adr 0x1014u
+
 /* global microprocessor scratch pad definitions */
 #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
 
index 1fe016f..f2ce12e 100644 (file)
@@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
        struct hw_atl_s *hw_self = PHAL_ATLANTIC;
        struct hw_aq_atl_utils_mbox mbox;
 
-       if (!self->aq_link_status.mbps)
-               return 0;
-
        hw_atl_utils_mpi_read_stats(self, &mbox);
 
 #define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
                        mbox.stats._N_ - hw_self->last_stats._N_)
-
-       AQ_SDELTA(uprc);
-       AQ_SDELTA(mprc);
-       AQ_SDELTA(bprc);
-       AQ_SDELTA(erpt);
-
-       AQ_SDELTA(uptc);
-       AQ_SDELTA(mptc);
-       AQ_SDELTA(bptc);
-       AQ_SDELTA(erpr);
-
-       AQ_SDELTA(ubrc);
-       AQ_SDELTA(ubtc);
-       AQ_SDELTA(mbrc);
-       AQ_SDELTA(mbtc);
-       AQ_SDELTA(bbrc);
-       AQ_SDELTA(bbtc);
-       AQ_SDELTA(dpc);
-
+       if (self->aq_link_status.mbps) {
+               AQ_SDELTA(uprc);
+               AQ_SDELTA(mprc);
+               AQ_SDELTA(bprc);
+               AQ_SDELTA(erpt);
+
+               AQ_SDELTA(uptc);
+               AQ_SDELTA(mptc);
+               AQ_SDELTA(bptc);
+               AQ_SDELTA(erpr);
+
+               AQ_SDELTA(ubrc);
+               AQ_SDELTA(ubtc);
+               AQ_SDELTA(mbrc);
+               AQ_SDELTA(mbtc);
+               AQ_SDELTA(bbrc);
+               AQ_SDELTA(bbtc);
+               AQ_SDELTA(dpc);
+       }
 #undef AQ_SDELTA
+       hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
+       hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
+       hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
+       hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
 
        memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
 
        return 0;
 }
 
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
-                             u64 *data, unsigned int *p_count)
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
 {
-       struct hw_atl_s *hw_self = PHAL_ATLANTIC;
-       struct hw_atl_stats_s *stats = &hw_self->curr_stats;
-       int i = 0;
-
-       data[i] = stats->uprc + stats->mprc + stats->bprc;
-       data[++i] = stats->uprc;
-       data[++i] = stats->mprc;
-       data[++i] = stats->bprc;
-       data[++i] = stats->erpt;
-       data[++i] = stats->uptc + stats->mptc + stats->bptc;
-       data[++i] = stats->uptc;
-       data[++i] = stats->mptc;
-       data[++i] = stats->bptc;
-       data[++i] = stats->ubrc;
-       data[++i] = stats->ubtc;
-       data[++i] = stats->mbrc;
-       data[++i] = stats->mbtc;
-       data[++i] = stats->bbrc;
-       data[++i] = stats->bbtc;
-       data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
-       data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
-       data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
-       data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
-       data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
-       data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
-       data[++i] = stats->dpc;
-
-       if (p_count)
-               *p_count = ++i;
-
-       return 0;
+       return &PHAL_ATLANTIC->curr_stats;
 }
 
 static const u32 hw_atl_utils_hw_mac_regs[] = {
index c99cc69..21aeca6 100644 (file)
@@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {
 struct __packed hw_atl_s {
        struct aq_hw_s base;
        struct hw_atl_stats_s last_stats;
-       struct hw_atl_stats_s curr_stats;
+       struct aq_stats_s curr_stats;
        u64 speed;
        unsigned int chip_features;
        u32 fw_ver_actual;
@@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
 
 int hw_atl_utils_update_stats(struct aq_hw_s *self);
 
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
-                             u64 *data,
-                             unsigned int *p_count);
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
 
 #endif /* HW_ATL_UTILS_H */
index 0de858d..9009f26 100644 (file)
 #define VER_H
 
 #define NIC_MAJOR_DRIVER_VERSION           1
-#define NIC_MINOR_DRIVER_VERSION           5
-#define NIC_BUILD_DRIVER_VERSION           345
+#define NIC_MINOR_DRIVER_VERSION           6
+#define NIC_BUILD_DRIVER_VERSION           13
 #define NIC_REVISION_DRIVER_VERSION        0
 
+#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
+
 #endif /* VER_H */
index 3c63b16..d9efbc8 100644 (file)
@@ -159,6 +159,8 @@ struct arc_emac_priv {
        unsigned int link;
        unsigned int duplex;
        unsigned int speed;
+
+       unsigned int rx_missed_errors;
 };
 
 /**
index 3241af1..bd277b0 100644 (file)
@@ -26,6 +26,8 @@
 
 #include "emac.h"
 
+static void arc_emac_restart(struct net_device *ndev);
+
 /**
  * arc_emac_tx_avail - Return the number of available slots in the tx ring.
  * @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
                        continue;
                }
 
-               pktlen = info & LEN_MASK;
-               stats->rx_packets++;
-               stats->rx_bytes += pktlen;
-               skb = rx_buff->skb;
-               skb_put(skb, pktlen);
-               skb->dev = ndev;
-               skb->protocol = eth_type_trans(skb, ndev);
-
-               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
-                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
-               /* Prepare the BD for next cycle */
-               rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
-                                                        EMAC_BUFFER_SIZE);
-               if (unlikely(!rx_buff->skb)) {
+               /* Prepare the BD for next cycle. netif_receive_skb()
+                * only if new skb was allocated and mapped to avoid holes
+                * in the RX fifo.
+                */
+               skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+               if (unlikely(!skb)) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "cannot allocate skb\n");
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
                        stats->rx_errors++;
-                       /* Because receive_skb is below, increment rx_dropped */
                        stats->rx_dropped++;
                        continue;
                }
 
-               /* receive_skb only if new skb was allocated to avoid holes */
-               netif_receive_skb(skb);
-
-               addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+               addr = dma_map_single(&ndev->dev, (void *)skb->data,
                                      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
                if (dma_mapping_error(&ndev->dev, addr)) {
                        if (net_ratelimit())
-                               netdev_err(ndev, "cannot dma map\n");
-                       dev_kfree_skb(rx_buff->skb);
+                               netdev_err(ndev, "cannot map dma buffer\n");
+                       dev_kfree_skb(skb);
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
                        stats->rx_errors++;
+                       stats->rx_dropped++;
                        continue;
                }
+
+               /* unmap previosly mapped skb */
+               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+               pktlen = info & LEN_MASK;
+               stats->rx_packets++;
+               stats->rx_bytes += pktlen;
+               skb_put(rx_buff->skb, pktlen);
+               rx_buff->skb->dev = ndev;
+               rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+               netif_receive_skb(rx_buff->skb);
+
+               rx_buff->skb = skb;
                dma_unmap_addr_set(rx_buff, addr, addr);
                dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
 
@@ -258,6 +269,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
        return work_done;
 }
 
+/**
+ * arc_emac_rx_miss_handle - handle R_MISS register
+ * @ndev:      Pointer to the net_device structure.
+ */
+static void arc_emac_rx_miss_handle(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       unsigned int miss;
+
+       miss = arc_reg_get(priv, R_MISS);
+       if (miss) {
+               stats->rx_errors += miss;
+               stats->rx_missed_errors += miss;
+               priv->rx_missed_errors += miss;
+       }
+}
+
+/**
+ * arc_emac_rx_stall_check - check RX stall
+ * @ndev:      Pointer to the net_device structure.
+ * @budget:    How many BDs requested to process on 1 call.
+ * @work_done: How many BDs processed
+ *
+ * Under certain conditions EMAC stop reception of incoming packets and
+ * continuously increment R_MISS register instead of saving data into
+ * provided buffer. This function detect that condition and restart
+ * EMAC.
+ */
+static void arc_emac_rx_stall_check(struct net_device *ndev,
+                                   int budget, unsigned int work_done)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct arc_emac_bd *rxbd;
+
+       if (work_done)
+               priv->rx_missed_errors = 0;
+
+       if (priv->rx_missed_errors && budget) {
+               rxbd = &priv->rxbd[priv->last_rx_bd];
+               if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
+                       arc_emac_restart(ndev);
+                       priv->rx_missed_errors = 0;
+               }
+       }
+}
+
 /**
  * arc_emac_poll - NAPI poll handler.
  * @napi:      Pointer to napi_struct structure.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
        unsigned int work_done;
 
        arc_emac_tx_clean(ndev);
+       arc_emac_rx_miss_handle(ndev);
 
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
                arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
+       arc_emac_rx_stall_check(ndev, budget, work_done);
+
        return work_done;
 }
 
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
                if (status & MSER_MASK) {
                        stats->rx_missed_errors += 0x100;
                        stats->rx_errors += 0x100;
+                       priv->rx_missed_errors += 0x100;
+                       napi_schedule(&priv->napi);
                }
 
                if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 }
 
 
+/**
+ * arc_emac_restart - Restart EMAC
+ * @ndev:      Pointer to net_device structure.
+ *
+ * This function do hardware reset of EMAC in order to restore
+ * network packets reception.
+ */
+static void arc_emac_restart(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       int i;
+
+       if (net_ratelimit())
+               netdev_warn(ndev, "restarting stalled EMAC\n");
+
+       netif_stop_queue(ndev);
+
+       /* Disable interrupts */
+       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+       /* Disable EMAC */
+       arc_reg_clr(priv, R_CTRL, EN_MASK);
+
+       /* Return the sk_buff to system */
+       arc_free_tx_queue(ndev);
+
+       /* Clean Tx BD's */
+       priv->txbd_curr = 0;
+       priv->txbd_dirty = 0;
+       memset(priv->txbd, 0, TX_RING_SZ);
+
+       for (i = 0; i < RX_BD_NUM; i++) {
+               struct arc_emac_bd *rxbd = &priv->rxbd[i];
+               unsigned int info = le32_to_cpu(rxbd->info);
+
+               if (!(info & FOR_EMAC)) {
+                       stats->rx_errors++;
+                       stats->rx_dropped++;
+               }
+               /* Return ownership to EMAC */
+               rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+       }
+       priv->last_rx_bd = 0;
+
+       /* Make sure info is visible to EMAC before enable */
+       wmb();
+
+       /* Enable interrupts */
+       arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+       /* Enable EMAC */
+       arc_reg_or(priv, R_CTRL, EN_MASK);
+
+       netif_start_queue(ndev);
+}
+
 static const struct net_device_ops arc_emac_netdev_ops = {
        .ndo_open               = arc_emac_open,
        .ndo_stop               = arc_emac_stop,
index e278e3d..16f9bee 100644 (file)
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
 
        /* RMII interface needs always a rate of 50MHz */
        err = clk_set_rate(priv->refclk, 50000000);
-       if (err)
+       if (err) {
                dev_err(dev,
                        "failed to change reference clock rate (%d)\n", err);
+               goto out_regulator_disable;
+       }
 
        if (priv->soc_data->need_div_macclk) {
                priv->macclk = devm_clk_get(dev, "macclk");
@@ -220,19 +222,24 @@ static int emac_rockchip_probe(struct platform_device *pdev)
 
                /* RMII TX/RX needs always a rate of 25MHz */
                err = clk_set_rate(priv->macclk, 25000000);
-               if (err)
+               if (err) {
                        dev_err(dev,
                                "failed to change mac clock rate (%d)\n", err);
+                       goto out_clk_disable_macclk;
+               }
        }
 
        err = arc_emac_probe(ndev, interface);
        if (err) {
                dev_err(dev, "failed to probe arc emac (%d)\n", err);
-               goto out_regulator_disable;
+               goto out_clk_disable_macclk;
        }
 
        return 0;
 
+out_clk_disable_macclk:
+       if (priv->soc_data->need_div_macclk)
+               clk_disable_unprepare(priv->macclk);
 out_regulator_disable:
        if (priv->regulator)
                regulator_disable(priv->regulator);
index 4c739d5..8ae269e 100644 (file)
@@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 
        del_timer_sync(&bp->timer);
 
-       if (IS_PF(bp)) {
+       if (IS_PF(bp) && !BP_NOMCP(bp)) {
                /* Set ALWAYS_ALIVE bit in shmem */
                bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
                bnx2x_drv_pulse(bp);
@@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
        bp->cnic_loaded = false;
 
        /* Clear driver version indication in shmem */
-       if (IS_PF(bp))
+       if (IS_PF(bp) && !BP_NOMCP(bp))
                bnx2x_update_mng_version(bp);
 
        /* Check if there are pending parity attentions. If there are - set
index 91e2a75..ddd5d3e 100644 (file)
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
 
        do {
                bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+               /* If we read all 0xFFs, means we are in PCI error state and
+                * should bail out to avoid crashes on adapter's FW reads.
+                */
+               if (bp->common.shmem_base == 0xFFFFFFFF) {
+                       bp->flags |= NO_MCP_FLAG;
+                       return -ENODEV;
+               }
+
                if (bp->common.shmem_base) {
                        val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
                        if (val & SHR_MEM_VALIDITY_MB)
@@ -14320,7 +14329,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
                BNX2X_ERR("IO slot reset --> driver unload\n");
 
                /* MCP should have been reset; Need to wait for validity */
-               bnx2x_init_shmem(bp);
+               if (bnx2x_init_shmem(bp)) {
+                       rtnl_unlock();
+                       return PCI_ERS_RESULT_DISCONNECT;
+               }
 
                if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
                        u32 v;
index c5c38d4..61ca4eb 100644 (file)
@@ -1883,7 +1883,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                         * here forever if we consistently cannot allocate
                         * buffers.
                         */
-                       else if (rc == -ENOMEM)
+                       else if (rc == -ENOMEM && budget)
                                rx_pkts++;
                        else if (rc == -EBUSY)  /* partial completion */
                                break;
@@ -1969,7 +1969,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
                                cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
 
                        rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
-                       if (likely(rc == -EIO))
+                       if (likely(rc == -EIO) && budget)
                                rx_pkts++;
                        else if (rc == -EBUSY)  /* partial completion */
                                break;
@@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        u16 cp_ring_id, len = 0;
        struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
        u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
+       struct hwrm_short_input short_input = {0};
 
        req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
        memset(resp, 0, PAGE_SIZE);
@@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
 
        if (bp->flags & BNXT_FLAG_SHORT_CMD) {
                void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
-               struct hwrm_short_input short_input = {0};
 
                memcpy(short_cmd_req, req, msg_len);
                memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
@@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
        if (netif_running(dev))
                dev_close(dev);
 
+       bnxt_ulp_shutdown(bp);
+
        if (system_state == SYSTEM_POWER_OFF) {
-               bnxt_ulp_shutdown(bp);
                bnxt_clear_int_mode(bp);
                pci_wake_from_d3(pdev, bp->wol);
                pci_set_power_state(pdev, PCI_D3hot);
index 7ce1d4b..b13ce5e 100644 (file)
@@ -2136,8 +2136,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
        /* Read A2 portion of the EEPROM */
        if (length) {
                start -= ETH_MODULE_SFF_8436_LEN;
-               bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
-                                                length, data);
+               rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+                                                     start, length, data);
        }
        return rc;
 }
index 5ee1866..c961767 100644 (file)
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
                netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
                return -EINVAL;
        }
-       if (vf_id >= bp->pf.max_vfs) {
+       if (vf_id >= bp->pf.active_vfs) {
                netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
                return -EINVAL;
        }
index d5031f4..d8fee26 100644 (file)
@@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
 {
        int ifindex = tcf_mirred_ifindex(tc_act);
        struct net_device *dev;
-       u16 dst_fid;
 
        dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
        if (!dev) {
@@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
                return -EINVAL;
        }
 
-       /* find the FID from dev */
-       dst_fid = bnxt_flow_get_dst_fid(bp, dev);
-       if (dst_fid == BNXT_FID_INVALID) {
-               netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
-               return -EINVAL;
-       }
-
        actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
-       actions->dst_fid = dst_fid;
        actions->dst_dev = dev;
        return 0;
 }
@@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
        if (rc)
                return rc;
 
-       /* Tunnel encap/decap action must be accompanied by a redirect action */
-       if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
-            actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
-           !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
-               netdev_info(bp->dev,
-                           "error: no redir action along with encap/decap");
-               return -EINVAL;
+       if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
+               if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
+                       /* dst_fid is PF's fid */
+                       actions->dst_fid = bp->pf.fw_fid;
+               } else {
+                       /* find the FID from dst_dev */
+                       actions->dst_fid =
+                               bnxt_flow_get_dst_fid(bp, actions->dst_dev);
+                       if (actions->dst_fid == BNXT_FID_INVALID)
+                               return -EINVAL;
+               }
        }
 
        return rc;
@@ -426,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
        }
 
        /* If all IP and L4 fields are wildcarded then this is an L2 flow */
-       if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
+       if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
            is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
                flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
        } else {
@@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
        }
 
        if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
-               enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
-                          CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
+               enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
                ether_addr_copy(req.dst_macaddr, l2_info->dmac);
-               ether_addr_copy(req.src_macaddr, l2_info->smac);
        }
        if (l2_info->num_vlans) {
                enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
@@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,
 
 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
                                       struct ip_tunnel_key *tun_key,
-                                      struct bnxt_tc_l2_key *l2_info,
-                                      struct net_device *real_dst_dev)
+                                      struct bnxt_tc_l2_key *l2_info)
 {
 #ifdef CONFIG_INET
+       struct net_device *real_dst_dev = bp->dev;
        struct flowi4 flow = { {0} };
        struct net_device *dst_dev;
        struct neighbour *nbr;
@@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
         */
        tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
        tun_key.tp_dst = flow->tun_key.tp_dst;
-       rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
+       rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
        if (rc)
                goto put_decap;
 
-       decap_key->ttl = tun_key.ttl;
        decap_l2_info = &decap_node->l2_info;
+       /* decap smac is wildcarded */
        ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
-       ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
        if (l2_info.num_vlans) {
                decap_l2_info->num_vlans = l2_info.num_vlans;
                decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
@@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
        if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
                goto done;
 
-       rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
-                                        flow->actions.dst_dev);
+       rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
        if (rc)
                goto put_encap;
 
@@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
        return 0;
 }
 
+static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
+                               u16 src_fid)
+{
+       if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
+               flow->src_fid = bp->pf.fw_fid;
+       else
+               flow->src_fid = src_fid;
+}
+
 /* Add a new flow or replace an existing flow.
  * Notes on locking:
  * There are essentially two critical sections here.
@@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
        rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
        if (rc)
                goto free_node;
-       flow->src_fid = src_fid;
+
+       bnxt_tc_set_src_fid(bp, flow, src_fid);
 
        if (!bnxt_tc_can_offload(bp, flow)) {
                rc = -ENOSPC;
index de51c21..8995cfe 100644 (file)
@@ -4,11 +4,13 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2014 Broadcom Corporation.
+ * Copyright (C) 2005-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
- *     Copyright (C) 2000-2003 Broadcom Corporation.
+ *     Copyright (C) 2000-2016 Broadcom Corporation.
+ *     Copyright (C) 2016-2017 Broadcom Ltd.
  *
  *     Permission is hereby granted for the distribution of this firmware
  *     data in hexadecimal or equivalent format, provided this copyright
@@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
 
        tw32(GRC_MODE, tp->grc_mode | val);
 
+       /* On one of the AMD platform, MRRS is restricted to 4000 because of
+        * south bridge limitation. As a workaround, Driver is setting MRRS
+        * to 2048 instead of default 4096.
+        */
+       if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+           tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
+               val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
+               tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
+       }
+
        /* Setup the timer prescalar register.  Clock is always 66Mhz. */
        val = tr32(GRC_MISC_CFG);
        val &= ~0xff;
@@ -14225,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
        /* Reset PHY, otherwise the read DMA engine will be in a mode that
         * breaks all requests to 256 bytes.
         */
-       if (tg3_asic_rev(tp) == ASIC_REV_57766)
+       if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+           tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720)
                reset_phy = true;
 
        err = tg3_restart_hw(tp, reset_phy);
index c2d02d0..1f0271f 100644 (file)
@@ -5,7 +5,8 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2014 Broadcom Corporation.
+ * Copyright (C) 2007-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
  */
 
 #ifndef _T3_H
@@ -96,6 +97,7 @@
 #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR                0x0106
 #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT                0x0109
 #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT   0x010a
+#define TG3PCI_SUBDEVICE_ID_DELL_5762          0x07f0
 #define TG3PCI_SUBVENDOR_ID_COMPAQ             PCI_VENDOR_ID_COMPAQ
 #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE     0x007c
 #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2   0x009a
 #define TG3PCI_STD_RING_PROD_IDX       0x00000098 /* 64-bit */
 #define TG3PCI_RCV_RET_RING_CON_IDX    0x000000a0 /* 64-bit */
 /* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DEV_STATUS_CTRL         0x000000b4
+#define  MAX_READ_REQ_SIZE_2048                 0x00004000
+#define  MAX_READ_REQ_MASK              0x00007000
 #define TG3PCI_DUAL_MAC_CTRL           0x000000b8
 #define  DUAL_MAC_CTRL_CH_MASK          0x00000003
 #define  DUAL_MAC_CTRL_ID               0x00000004
index 6aa0eee..a5eecd8 100644 (file)
@@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)
                                dev_err(&oct->pci_dev->dev,
                                        "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
                                        core);
-                                       err_msg_was_printed[core] = true;
+                               err_msg_was_printed[core] = true;
                        }
                }
 
index d4496e9..a3d12db 100644 (file)
@@ -1355,7 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
 
        /* Offload checksum calculation to HW */
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               hdr->csum_l3 = 1; /* Enable IP csum calculation */
+               if (ip.v4->version == 4)
+                       hdr->csum_l3 = 1; /* Enable IP csum calculation */
                hdr->l3_offset = skb_network_offset(skb);
                hdr->l4_offset = skb_transport_offset(skb);
 
index 6f9fa6e..d8424ed 100644 (file)
@@ -344,7 +344,6 @@ struct adapter_params {
 
        unsigned int sf_size;             /* serial flash size in bytes */
        unsigned int sf_nsec;             /* # of flash sectors */
-       unsigned int sf_fw_start;         /* start of FW image in flash */
 
        unsigned int fw_vers;             /* firmware version */
        unsigned int bs_vers;             /* bootstrap version */
index d4a548a..a452d5a 100644 (file)
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                        ethtype_mask = 0;
                }
 
+               if (ethtype_key == ETH_P_IPV6)
+                       fs->type = 1;
+
                fs->val.ethtype = ethtype_key;
                fs->mask.ethtype = ethtype_mask;
                fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                                           VLAN_PRIO_SHIFT);
                vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
                                                 VLAN_PRIO_SHIFT);
-               fs->val.ivlan = cpu_to_be16(vlan_tci);
-               fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
+               fs->val.ivlan = vlan_tci;
+               fs->mask.ivlan = vlan_tci_mask;
 
                /* Chelsio adapters use ivlan_vld bit to match vlan packets
                 * as 802.1Q. Also, when vlan tag is present in packets,
index f63210f..375ef86 100644 (file)
@@ -2844,8 +2844,6 @@ enum {
        SF_RD_DATA_FAST = 0xb,        /* read flash */
        SF_RD_ID        = 0x9f,       /* read ID */
        SF_ERASE_SECTOR = 0xd8,       /* erase sector */
-
-       FW_MAX_SIZE = 16 * SF_SEC_SIZE,
 };
 
 /**
@@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
        const __be32 *p = (const __be32 *)fw_data;
        const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
        unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-       unsigned int fw_img_start = adap->params.sf_fw_start;
-       unsigned int fw_start_sec = fw_img_start / sf_sec_size;
+       unsigned int fw_start_sec = FLASH_FW_START_SEC;
+       unsigned int fw_size = FLASH_FW_MAX_SIZE;
+       unsigned int fw_start = FLASH_FW_START;
 
        if (!size) {
                dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
                        "FW image size differs from size in FW header\n");
                return -EINVAL;
        }
-       if (size > FW_MAX_SIZE) {
+       if (size > fw_size) {
                dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
-                       FW_MAX_SIZE);
+                       fw_size);
                return -EFBIG;
        }
        if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
         */
        memcpy(first_page, fw_data, SF_PAGE_SIZE);
        ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
-       ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
+       ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
        if (ret)
                goto out;
 
-       addr = fw_img_start;
+       addr = fw_start;
        for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
                addr += SF_PAGE_SIZE;
                fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
        }
 
        ret = t4_write_flash(adap,
-                            fw_img_start + offsetof(struct fw_hdr, fw_ver),
+                            fw_start + offsetof(struct fw_hdr, fw_ver),
                             sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
 out:
        if (ret)
index 410a0a9..b3e7faf 100644 (file)
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
 module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
 
 #endif /* CONFIG_CS89x0_PLATFORM */
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
+MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");
index c6e859a..e180657 100644 (file)
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
 
        be_schedule_worker(adapter);
 
+       /*
+        * The IF was destroyed and re-created. We need to clear
+        * all promiscuous flags valid for the destroyed IF.
+        * Without this promisc mode is not restored during
+        * be_open() because the driver thinks that it is
+        * already enabled in HW.
+        */
+       adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+
        if (netif_running(netdev))
                status = be_open(netdev);
 
index 6105738..a74300a 100644 (file)
@@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
                for (i = 0; i < txq->bd.ring_size; i++) {
                        /* Initialize the BD for every fragment in the page. */
                        bdp->cbd_sc = cpu_to_fec16(0);
+                       if (bdp->cbd_bufaddr &&
+                           !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                               dma_unmap_single(&fep->pdev->dev,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
+                                                fec16_to_cpu(bdp->cbd_datlen),
+                                                DMA_TO_DEVICE);
                        if (txq->tx_skbuff[i]) {
                                dev_kfree_skb_any(txq->tx_skbuff[i]);
                                txq->tx_skbuff[i] = NULL;
@@ -3463,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
                        goto failed_regulator;
                }
        } else {
+               if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
+                       ret = -EPROBE_DEFER;
+                       goto failed_regulator;
+               }
                fep->reg_phy = NULL;
        }
 
@@ -3546,8 +3556,9 @@ failed_clk_ipg:
 failed_clk:
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
-failed_phy:
        of_node_put(phy_node);
+failed_phy:
+       dev_id--;
 failed_ioremap:
        free_netdev(ndev);
 
index 7892f2f..2c2976a 100644 (file)
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout_work(struct work_struct *work)
 {
-       struct fs_enet_private *fep = netdev_priv(dev);
+       struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
+                                                  timeout_work);
+       struct net_device *dev = fep->ndev;
        unsigned long flags;
        int wake = 0;
 
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
                phy_stop(dev->phydev);
                (*fep->ops->stop)(dev);
                (*fep->ops->restart)(dev);
-               phy_start(dev->phydev);
        }
 
        phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
                netif_wake_queue(dev);
 }
 
+static void fs_timeout(struct net_device *dev)
+{
+       struct fs_enet_private *fep = netdev_priv(dev);
+
+       schedule_work(&fep->timeout_work);
+}
+
 /*-----------------------------------------------------------------------------
  *  generic link-change handler - should be sufficient for most cases
  *-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
        netif_stop_queue(dev);
        netif_carrier_off(dev);
        napi_disable(&fep->napi);
+       cancel_work_sync(&fep->timeout_work);
        phy_stop(dev->phydev);
 
        spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        ndev->netdev_ops = &fs_enet_netdev_ops;
        ndev->watchdog_timeo = 2 * HZ;
+       INIT_WORK(&fep->timeout_work, fs_timeout_work);
        netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
 
        ndev->ethtool_ops = &fs_ethtool_ops;
index 92e06b3..195fae6 100644 (file)
@@ -125,6 +125,7 @@ struct fs_enet_private {
        spinlock_t lock;        /* during all ops except TX pckt processing */
        spinlock_t tx_lock;     /* during fs_start_xmit and fs_tx         */
        struct fs_platform_info *fpi;
+       struct work_struct timeout_work;
        const struct fs_ops *ops;
        int rx_ring, tx_ring;
        dma_addr_t ring_mem_addr;
index 5be52d8..7f83700 100644 (file)
@@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev)
 
        gfar_init_addr_hash_table(priv);
 
-       /* Insert receive time stamps into padding alignment bytes */
+       /* Insert receive time stamps into padding alignment bytes, and
+        * plus 2 bytes padding to ensure the cpu alignment.
+        */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
-               priv->padding = 8;
+               priv->padding = 8 + DEFAULT_PADDING;
 
        if (dev->features & NETIF_F_IP_CSUM ||
            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
@@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev)
                GFAR_SUPPORTED_GBIT : 0;
        phy_interface_t interface;
        struct phy_device *phydev;
+       struct ethtool_eee edata;
 
        priv->oldlink = 0;
        priv->oldspeed = 0;
@@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev)
        /* Add support for flow control, but don't advertise it by default */
        phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
 
+       /* disable EEE autoneg, EEE not supported by eTSEC */
+       memset(&edata, 0, sizeof(struct ethtool_eee));
+       phy_ethtool_set_eee(phydev, &edata);
+
        return 0;
 }
 
index 5441142..9f8d4f8 100644 (file)
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
        now = tmr_cnt_read(etsects);
        now += delta;
        tmr_cnt_write(etsects, now);
+       set_fipers(etsects);
 
        spin_unlock_irqrestore(&etsects->lock, flags);
 
-       set_fipers(etsects);
-
        return 0;
 }
 
index 7feff24..241db31 100644 (file)
@@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
        case 16384:
                ret |= EMAC_MR1_RFS_16K;
                break;
+       case 8192:
+               ret |= EMAC4_MR1_RFS_8K;
+               break;
        case 4096:
                ret |= EMAC_MR1_RFS_4K;
                break;
@@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
        case 16384:
                ret |= EMAC4_MR1_TFS_16K;
                break;
+       case 8192:
+               ret |= EMAC4_MR1_TFS_8K;
+               break;
        case 4096:
                ret |= EMAC4_MR1_TFS_4K;
                break;
index 5afcc27..c26d263 100644 (file)
@@ -151,9 +151,11 @@ struct emac_regs {
 
 #define EMAC4_MR1_RFS_2K               0x00100000
 #define EMAC4_MR1_RFS_4K               0x00180000
+#define EMAC4_MR1_RFS_8K               0x00200000
 #define EMAC4_MR1_RFS_16K              0x00280000
 #define EMAC4_MR1_TFS_2K                       0x00020000
 #define EMAC4_MR1_TFS_4K               0x00030000
+#define EMAC4_MR1_TFS_8K               0x00040000
 #define EMAC4_MR1_TFS_16K              0x00050000
 #define EMAC4_MR1_TR                   0x00008000
 #define EMAC4_MR1_MWSW_001             0x00001000
@@ -242,7 +244,7 @@ struct emac_regs {
 #define EMAC_STACR_PHYE                        0x00004000
 #define EMAC_STACR_STAC_MASK           0x00003000
 #define EMAC_STACR_STAC_READ           0x00001000
-#define EMAC_STACR_STAC_WRITE          0x00002000
+#define EMAC_STACR_STAC_WRITE          0x00000800
 #define EMAC_STACR_OPBC_MASK           0x00000C00
 #define EMAC_STACR_OPBC_50             0x00000000
 #define EMAC_STACR_OPBC_66             0x00000400
index 1dc4aef..b65f5f3 100644 (file)
@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
        struct ibmvnic_rx_pool *rx_pool;
        int rx_scrqs;
        int i, j, rc;
+       u64 *size_array;
+
+       size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+               be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
 
        rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
        for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 
                netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
 
-               rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+               if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+                       free_long_term_buff(adapter, &rx_pool->long_term_buff);
+                       rx_pool->buff_size = be64_to_cpu(size_array[i]);
+                       alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+                                            rx_pool->size *
+                                            rx_pool->buff_size);
+               } else {
+                       rc = reset_long_term_buff(adapter,
+                                                 &rx_pool->long_term_buff);
+               }
+
                if (rc)
                        return rc;
 
@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 static void release_rx_pools(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_rx_pool *rx_pool;
-       int rx_scrqs;
        int i, j;
 
        if (!adapter->rx_pool)
                return;
 
-       rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
-       for (i = 0; i < rx_scrqs; i++) {
+       for (i = 0; i < adapter->num_active_rx_pools; i++) {
                rx_pool = &adapter->rx_pool[i];
 
                netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
 
        kfree(adapter->rx_pool);
        adapter->rx_pool = NULL;
+       adapter->num_active_rx_pools = 0;
 }
 
 static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
                return -1;
        }
 
+       adapter->num_active_rx_pools = 0;
+
        for (i = 0; i < rxadd_subcrqs; i++) {
                rx_pool = &adapter->rx_pool[i];
 
@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
                rx_pool->next_free = 0;
        }
 
+       adapter->num_active_rx_pools = rxadd_subcrqs;
+
        return 0;
 }
 
@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
 static void release_tx_pools(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_tx_pool *tx_pool;
-       int i, tx_scrqs;
+       int i;
 
        if (!adapter->tx_pool)
                return;
 
-       tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
-       for (i = 0; i < tx_scrqs; i++) {
+       for (i = 0; i < adapter->num_active_tx_pools; i++) {
                netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
                tx_pool = &adapter->tx_pool[i];
                kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
 
        kfree(adapter->tx_pool);
        adapter->tx_pool = NULL;
+       adapter->num_active_tx_pools = 0;
 }
 
 static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
        if (!adapter->tx_pool)
                return -1;
 
+       adapter->num_active_tx_pools = 0;
+
        for (i = 0; i < tx_subcrqs; i++) {
                tx_pool = &adapter->tx_pool[i];
 
@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
                tx_pool->producer_index = 0;
        }
 
+       adapter->num_active_tx_pools = tx_subcrqs;
+
        return 0;
 }
 
@@ -756,6 +777,12 @@ static int ibmvnic_login(struct net_device *netdev)
                }
        } while (adapter->renegotiate);
 
+       /* handle pending MAC address changes after successful login */
+       if (adapter->mac_change_pending) {
+               __ibmvnic_set_mac(netdev, &adapter->desired.mac);
+               adapter->mac_change_pending = false;
+       }
+
        return 0;
 }
 
@@ -854,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
        if (adapter->vpd->buff)
                len = adapter->vpd->len;
 
-       reinit_completion(&adapter->fw_done);
+       init_completion(&adapter->fw_done);
        crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
        crq.get_vpd_size.cmd = GET_VPD_SIZE;
        ibmvnic_send_crq(adapter, &crq);
@@ -916,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
        if (!adapter->vpd)
                return -ENOMEM;
 
+       /* Vital Product Data (VPD) */
+       rc = ibmvnic_get_vpd(adapter);
+       if (rc) {
+               netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
+               return rc;
+       }
+
        adapter->map_id = 1;
        adapter->napi = kcalloc(adapter->req_rx_queues,
                                sizeof(struct napi_struct), GFP_KERNEL);
@@ -989,15 +1023,10 @@ static int __ibmvnic_open(struct net_device *netdev)
 static int ibmvnic_open(struct net_device *netdev)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-       int rc, vpd;
+       int rc;
 
        mutex_lock(&adapter->reset_lock);
 
-       if (adapter->mac_change_pending) {
-               __ibmvnic_set_mac(netdev, &adapter->desired.mac);
-               adapter->mac_change_pending = false;
-       }
-
        if (adapter->state != VNIC_CLOSED) {
                rc = ibmvnic_login(netdev);
                if (rc) {
@@ -1017,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
        rc = __ibmvnic_open(netdev);
        netif_carrier_on(netdev);
 
-       /* Vital Product Data (VPD) */
-       vpd = ibmvnic_get_vpd(adapter);
-       if (vpd)
-               netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
-
        mutex_unlock(&adapter->reset_lock);
 
        return rc;
@@ -1275,6 +1299,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        unsigned char *dst;
        u64 *handle_array;
        int index = 0;
+       u8 proto = 0;
        int ret = 0;
 
        if (adapter->resetting) {
@@ -1363,17 +1388,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (skb->protocol == htons(ETH_P_IP)) {
-               if (ip_hdr(skb)->version == 4)
-                       tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
-               else if (ip_hdr(skb)->version == 6)
-                       tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
-
-               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
-                       tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
-               else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
-                       tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+               tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
+               proto = ip_hdr(skb)->protocol;
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
+               proto = ipv6_hdr(skb)->nexthdr;
        }
 
+       if (proto == IPPROTO_TCP)
+               tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
+       else if (proto == IPPROTO_UDP)
+               tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
                hdrs += 2;
@@ -1527,7 +1553,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        struct sockaddr *addr = p;
 
-       if (adapter->state != VNIC_OPEN) {
+       if (adapter->state == VNIC_PROBED) {
                memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
                adapter->mac_change_pending = true;
                return 0;
@@ -1545,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 static int do_reset(struct ibmvnic_adapter *adapter,
                    struct ibmvnic_rwi *rwi, u32 reset_state)
 {
+       u64 old_num_rx_queues, old_num_tx_queues;
        struct net_device *netdev = adapter->netdev;
        int i, rc;
 
@@ -1554,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        netif_carrier_off(netdev);
        adapter->reset_reason = rwi->reset_reason;
 
+       old_num_rx_queues = adapter->req_rx_queues;
+       old_num_tx_queues = adapter->req_tx_queues;
+
        if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
                rc = ibmvnic_reenable_crq_queue(adapter);
                if (rc)
@@ -1598,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        rc = init_resources(adapter);
                        if (rc)
                                return rc;
+               } else if (adapter->req_rx_queues != old_num_rx_queues ||
+                          adapter->req_tx_queues != old_num_tx_queues) {
+                       release_rx_pools(adapter);
+                       release_tx_pools(adapter);
+                       init_rx_pools(netdev);
+                       init_tx_pools(netdev);
                } else {
                        rc = reset_tx_pools(adapter);
                        if (rc)
@@ -3345,7 +3381,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
                return;
        }
 
+       adapter->ip_offload_ctrl.len =
+           cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
        adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
+       adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
+       adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
        adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
        adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
        adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
@@ -3585,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be64_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               *req_value = be64_to_cpu(crq->request_capability_rsp.number);
+
+               if (be16_to_cpu(crq->request_capability_rsp.capability) ==
+                   REQ_MTU) {
+                       pr_err("mtu of %llu is not supported. Reverting.\n",
+                              *req_value);
+                       *req_value = adapter->fallback.mtu;
+               } else {
+                       *req_value =
+                               be64_to_cpu(crq->request_capability_rsp.number);
+               }
+
                ibmvnic_send_req_caps(adapter, 1);
                return;
        default:
index 4487f1e..3aec421 100644 (file)
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
        u64 opt_rxba_entries_per_subcrq;
        __be64 tx_rx_desc_req;
        u8 map_id;
+       u64 num_active_rx_pools;
+       u64 num_active_tx_pools;
 
        struct tasklet_struct tasklet;
        enum vnic_state state;
index d7bdea7..8fd2458 100644 (file)
@@ -331,7 +331,8 @@ struct e1000_adapter {
 enum e1000_state_t {
        __E1000_TESTING,
        __E1000_RESETTING,
-       __E1000_DOWN
+       __E1000_DOWN,
+       __E1000_DISABLED
 };
 
 #undef pr_fmt
index 8172cf0..3bac9df 100644 (file)
@@ -4307,8 +4307,10 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
 
        rar_num = E1000_RAR_ENTRIES;
 
-       /* Zero out the other 15 receive addresses. */
-       e_dbg("Clearing RAR[1-15]\n");
+       /* Zero out the following 14 receive addresses. RAR[15] is for
+        * manageability
+        */
+       e_dbg("Clearing RAR[1-14]\n");
        for (i = 1; i < rar_num; i++) {
                E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
                E1000_WRITE_FLUSH();
index 1982f79..3dd4aeb 100644 (file)
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct net_device *netdev;
-       struct e1000_adapter *adapter;
+       struct e1000_adapter *adapter = NULL;
        struct e1000_hw *hw;
 
        static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        u16 tmp = 0;
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
        int bars, need_ioport;
+       bool disable_dev = false;
 
        /* do not allocate ioport bars when not needed */
        need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
        iounmap(hw->ce4100_gbe_mdio_base_virt);
        iounmap(hw->hw_addr);
 err_ioremap:
+       disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_release_selected_regions(pdev, bars);
 err_pci_reg:
-       pci_disable_device(pdev);
+       if (!adapter || disable_dev)
+               pci_disable_device(pdev);
        return err;
 }
 
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       bool disable_dev;
 
        e1000_down_and_stop(adapter);
        e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
                iounmap(hw->flash_address);
        pci_release_selected_regions(pdev, adapter->bars);
 
+       disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
        free_netdev(netdev);
 
-       pci_disable_device(pdev);
+       if (disable_dev)
+               pci_disable_device(pdev);
 }
 
 /**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
        if (netif_running(netdev))
                e1000_free_irq(adapter);
 
-       pci_disable_device(pdev);
+       if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+               pci_disable_device(pdev);
 
        return 0;
 }
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
                pr_err("Cannot enable PCI device from suspend\n");
                return err;
        }
+
+       /* flush memory to make sure state is correct */
+       smp_mb__before_atomic();
+       clear_bit(__E1000_DISABLED, &adapter->flags);
        pci_set_master(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 
        if (netif_running(netdev))
                e1000_down(adapter);
-       pci_disable_device(pdev);
+
+       if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+               pci_disable_device(pdev);
 
        /* Request a slot slot reset. */
        return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
                pr_err("Cannot re-enable PCI device after reset.\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
+
+       /* flush memory to make sure state is correct */
+       smp_mb__before_atomic();
+       clear_bit(__E1000_DISABLED, &adapter->flags);
        pci_set_master(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
index d6d4ed7..31277d3 100644 (file)
@@ -1367,6 +1367,9 @@ out:
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
+ *
+ *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ *  up).
  **/
 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 {
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
         * Change or Rx Sequence Error interrupt.
         */
        if (!mac->get_link_status)
-               return 0;
+               return 1;
 
        /* First we want to see if the MII Status Register reports
         * link.  If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
         * different link partner.
         */
        ret_val = e1000e_config_fc_after_link_up(hw);
-       if (ret_val)
+       if (ret_val) {
                e_dbg("Error configuring flow control\n");
+               return ret_val;
+       }
 
-       return ret_val;
+       return 1;
 }
 
 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
index 67163ca..00a36df 100644 (file)
 #define NVM_SIZE_MULTIPLIER 4096       /*multiplier for NVMS field */
 #define E1000_FLASH_BASE_ADDR 0xE000   /*offset of NVM access regs */
 #define E1000_CTRL_EXT_NVMVS 0x3       /*NVM valid sector */
-#define E1000_TARC0_CB_MULTIQ_3_REQ    (1 << 28 | 1 << 29)
+#define E1000_TARC0_CB_MULTIQ_3_REQ    0x30000000
+#define E1000_TARC0_CB_MULTIQ_2_REQ    0x20000000
 #define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES  7
index f2f4923..9f18d39 100644 (file)
@@ -3034,9 +3034,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
                ew32(IOSFPC, reg_val);
 
                reg_val = er32(TARC(0));
-               /* SPT and KBL Si errata workaround to avoid Tx hang */
-               reg_val &= ~BIT(28);
-               reg_val |= BIT(29);
+               /* SPT and KBL Si errata workaround to avoid Tx hang.
+                * Dropping the number of outstanding requests from
+                * 3 to 2 in order to avoid a buffer overrun.
+                */
+               reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
+               reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
                ew32(TARC(0), reg_val);
        }
 }
index 7f60522..a434fec 100644 (file)
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
        return err;
 }
 
-#ifdef CONFIG_PM
 /**
  * fm10k_resume - Generic PM resume hook
  * @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
  * suspend or hibernation. This function does not need to handle lower PCIe
  * device state as the stack takes care of that for us.
  **/
-static int fm10k_resume(struct device *dev)
+static int __maybe_unused fm10k_resume(struct device *dev)
 {
        struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
        struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
  * system suspend or hibernation. This function does not need to handle lower
  * PCIe device state as the stack takes care of that for us.
  **/
-static int fm10k_suspend(struct device *dev)
+static int __maybe_unused fm10k_suspend(struct device *dev)
 {
        struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
        struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
        return 0;
 }
 
-#endif /* CONFIG_PM */
-
 /**
  * fm10k_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
        .id_table               = fm10k_pci_tbl,
        .probe                  = fm10k_probe,
        .remove                 = fm10k_remove,
-#ifdef CONFIG_PM
        .driver = {
                .pm             = &fm10k_pm_ops,
        },
-#endif /* CONFIG_PM */
        .sriov_configure        = fm10k_iov_configure,
        .err_handler            = &fm10k_err_handler
 };
index 4c08cc8..af79211 100644 (file)
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
        else
                netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
 
+       /* Copy the address first, so that we avoid a possible race with
+        * .set_rx_mode(). If we copy after changing the address in the filter
+        * list, we might open ourselves to a narrow race window where
+        * .set_rx_mode could delete our dev_addr filter and prevent traffic
+        * from passing.
+        */
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
        spin_lock_bh(&vsi->mac_filter_hash_lock);
        i40e_del_mac_filter(vsi, netdev->dev_addr);
        i40e_add_mac_filter(vsi, addr->sa_data);
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
-       ether_addr_copy(netdev->dev_addr, addr->sa_data);
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
 
@@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 
+       /* Under some circumstances, we might receive a request to delete
+        * our own device address from our uc list. Because we store the
+        * device address in the VSI's MAC/VLAN filter list, we need to ignore
+        * such requests and not delete our device address from this list.
+        */
+       if (ether_addr_equal(addr, netdev->dev_addr))
+               return 0;
+
        i40e_del_mac_filter(vsi, addr);
 
        return 0;
@@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
        /* Set Bit 7 to be valid */
        mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
 
-       /* Set L4type to both TCP and UDP support */
-       mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
+       /* Set L4type for TCP support */
+       mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
 
        /* Set cloud filter mode */
        mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
             is_valid_ether_addr(filter->src_mac)) ||
            (is_multicast_ether_addr(filter->dst_mac) &&
             is_multicast_ether_addr(filter->src_mac)))
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
-       /* Make sure port is specified, otherwise bail out, for channel
-        * specific cloud filter needs 'L4 port' to be non-zero
+       /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
+        * ports are not supported via big buffer now.
         */
-       if (!filter->dst_port)
-               return -EINVAL;
+       if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
+               return -EOPNOTSUPP;
 
        /* adding filter using src_port/src_ip is not supported at this stage */
        if (filter->src_port || filter->src_ipv4 ||
            !ipv6_addr_any(&filter->ip.v6.src_ip6))
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
        /* copy element needed to add cloud filter from filter */
        i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
            is_multicast_ether_addr(filter->src_mac)) {
                /* MAC + IP : unsupported mode */
                if (filter->dst_ipv4)
-                       return -EINVAL;
+                       return -EOPNOTSUPP;
 
                /* since we validated that L4 port must be valid before
                 * we get here, start with respective "flags" value
@@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
 
        if (tc < 0) {
                dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
-               return -EINVAL;
+               return -EOPNOTSUPP;
        }
 
        if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -7401,7 +7416,6 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
                dev_err(&pf->pdev->dev,
                        "Failed to add cloud filter, err %s\n",
                        i40e_stat_str(&pf->hw, err));
-               err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
                goto err;
        }
 
@@ -7491,6 +7505,8 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
 {
        struct i40e_vsi *vsi = np->vsi;
 
+       if (!tc_can_offload(vsi->netdev))
+               return -EOPNOTSUPP;
        if (cls_flower->common.chain_index)
                return -EOPNOTSUPP;
 
index 4566d66..5bc2748 100644 (file)
@@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
-       stale = &skb_shinfo(skb)->frags[0];
-       for (;;) {
+       for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+               int stale_size = skb_frag_size(stale);
+
                sum += skb_frag_size(frag++);
 
+               /* The stale fragment may present us with a smaller
+                * descriptor than the actual fragment size. To account
+                * for that we need to remove all the data on the front and
+                * figure out what the remainder would be in the last
+                * descriptor associated with the fragment.
+                */
+               if (stale_size > I40E_MAX_DATA_PER_TXD) {
+                       int align_pad = -(stale->page_offset) &
+                                       (I40E_MAX_READ_REQ_SIZE - 1);
+
+                       sum -= align_pad;
+                       stale_size -= align_pad;
+
+                       do {
+                               sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+                               stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+                       } while (stale_size > I40E_MAX_DATA_PER_TXD);
+               }
+
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
                        return true;
@@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
                if (!nr_frags--)
                        break;
 
-               sum -= skb_frag_size(stale++);
+               sum -= stale_size;
        }
 
        return false;
index a3dc9b9..36cb8e0 100644 (file)
@@ -2086,7 +2086,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
        }
 
        return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
-                                     (u8 *)vfres, sizeof(vfres));
+                                     (u8 *)vfres, sizeof(*vfres));
 }
 
 /**
index 50864f9..1ba29bb 100644 (file)
@@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
-       stale = &skb_shinfo(skb)->frags[0];
-       for (;;) {
+       for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+               int stale_size = skb_frag_size(stale);
+
                sum += skb_frag_size(frag++);
 
+               /* The stale fragment may present us with a smaller
+                * descriptor than the actual fragment size. To account
+                * for that we need to remove all the data on the front and
+                * figure out what the remainder would be in the last
+                * descriptor associated with the fragment.
+                */
+               if (stale_size > I40E_MAX_DATA_PER_TXD) {
+                       int align_pad = -(stale->page_offset) &
+                                       (I40E_MAX_READ_REQ_SIZE - 1);
+
+                       sum -= align_pad;
+                       stale_size -= align_pad;
+
+                       do {
+                               sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+                               stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+                       } while (stale_size > I40E_MAX_DATA_PER_TXD);
+               }
+
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
                        return true;
@@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
                if (!nr_frags--)
                        break;
 
-               sum -= skb_frag_size(stale++);
+               sum -= stale_size;
        }
 
        return false;
index c979821..0495487 100644 (file)
@@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
                        dev->regs + MVMDIO_ERR_INT_MASK);
 
        } else if (dev->err_interrupt == -EPROBE_DEFER) {
-               return -EPROBE_DEFER;
+               ret = -EPROBE_DEFER;
+               goto out_mdio;
        }
 
        if (pdev->dev.of_node)
index bc93b69..a539263 100644 (file)
@@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
        val &= ~MVNETA_GMAC0_PORT_ENABLE;
        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 
+       pp->link = 0;
+       pp->duplex = -1;
+       pp->speed = 0;
+
        udelay(200);
 }
 
@@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 
                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+                       mvneta_rx_error(pp, rx_desc);
 err_drop_frame:
                        dev->stats.rx_errors++;
-                       mvneta_rx_error(pp, rx_desc);
                        /* leave the descriptor untouched */
                        continue;
                }
@@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
        int queue;
 
-       for (queue = 0; queue < txq_number; queue++)
+       for (queue = 0; queue < rxq_number; queue++)
                mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
 }
 
index 6c20e81..634b2f4 100644 (file)
@@ -85,7 +85,7 @@
 
 /* RSS Registers */
 #define MVPP22_RSS_INDEX                       0x1500
-#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)  ((idx) << 8)
+#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)  (idx)
 #define     MVPP22_RSS_INDEX_TABLE(idx)                ((idx) << 8)
 #define     MVPP22_RSS_INDEX_QUEUE(idx)                ((idx) << 16)
 #define MVPP22_RSS_TABLE_ENTRY                 0x1508
@@ -4629,11 +4629,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
                       MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
                val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
                writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
-               val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
-               val |= MVPP2_GMAC_DISABLE_PADDING;
-               val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
-               writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
        } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
                val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
                val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
@@ -4641,10 +4636,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
                       MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
                val &= ~MVPP22_CTRL4_DP_CLK_SEL;
                writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
-               val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
-               val &= ~MVPP2_GMAC_DISABLE_PADDING;
-               writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
        }
 
        /* The port is connected to a copper PHY */
@@ -5607,7 +5598,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
        u32 txq_dma;
 
        /* Allocate memory for TX descriptors */
-       aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+       aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
                                MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
                                &aggr_txq->descs_dma, GFP_KERNEL);
        if (!aggr_txq->descs)
@@ -5805,7 +5796,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
                                                sizeof(*txq_pcpu->buffs),
                                                GFP_KERNEL);
                if (!txq_pcpu->buffs)
-                       goto cleanup;
+                       return -ENOMEM;
 
                txq_pcpu->count = 0;
                txq_pcpu->reserved_num = 0;
@@ -5821,26 +5812,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
                                           &txq_pcpu->tso_headers_dma,
                                           GFP_KERNEL);
                if (!txq_pcpu->tso_headers)
-                       goto cleanup;
+                       return -ENOMEM;
        }
 
        return 0;
-cleanup:
-       for_each_present_cpu(cpu) {
-               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-               kfree(txq_pcpu->buffs);
-
-               dma_free_coherent(port->dev->dev.parent,
-                                 txq_pcpu->size * TSO_HEADER_SIZE,
-                                 txq_pcpu->tso_headers,
-                                 txq_pcpu->tso_headers_dma);
-       }
-
-       dma_free_coherent(port->dev->dev.parent,
-                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
-                         txq->descs, txq->descs_dma);
-
-       return -ENOMEM;
 }
 
 /* Free allocated TXQ resources */
@@ -6867,6 +6842,12 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
        else if (!IS_ALIGNED(ring->tx_pending, 32))
                new_tx_pending = ALIGN(ring->tx_pending, 32);
 
+       /* The Tx ring size cannot be smaller than the minimum number of
+        * descriptors needed for TSO.
+        */
+       if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
+               new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+
        if (ring->rx_pending != new_rx_pending) {
                netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
                            ring->rx_pending, new_rx_pending);
@@ -8345,7 +8326,7 @@ static int mvpp2_probe(struct platform_device *pdev)
        for_each_available_child_of_node(dn, port_node) {
                err = mvpp2_port_probe(pdev, port_node, priv, i);
                if (err < 0)
-                       goto err_mg_clk;
+                       goto err_port_probe;
                i++;
        }
 
@@ -8361,12 +8342,19 @@ static int mvpp2_probe(struct platform_device *pdev)
        priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
        if (!priv->stats_queue) {
                err = -ENOMEM;
-               goto err_mg_clk;
+               goto err_port_probe;
        }
 
        platform_set_drvdata(pdev, priv);
        return 0;
 
+err_port_probe:
+       i = 0;
+       for_each_available_child_of_node(dn, port_node) {
+               if (priv->port_list[i])
+                       mvpp2_port_remove(priv->port_list[i]);
+               i++;
+       }
 err_mg_clk:
        clk_disable_unprepare(priv->axi_clk);
        if (priv->hw_version == MVPP22)
index 6e423f0..31efc47 100644 (file)
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
        if (hw->ports > 1) {
                skge_write32(hw, B0_IMSK, 0);
                skge_read32(hw, B0_IMSK);
-               free_irq(pdev->irq, hw);
        }
        spin_unlock_irq(&hw->hw_lock);
 
index 54adfd9..fc67e35 100644 (file)
@@ -1961,11 +1961,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
        /* set GE2 TUNE */
        regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
 
-       /* GE1, Force 1000M/FD, FC ON */
-       mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
-
-       /* GE2, Force 1000M/FD, FC ON */
-       mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+       /* Set linkdown as the default for each GMAC. Its own MCR would be set
+        * up with the more appropriate value when mtk_phy_link_adjust call is
+        * being invoked.
+        */
+       for (i = 0; i < MTK_MAC_COUNT; i++)
+               mtk_w32(eth, 0, MTK_MAC_MCR(i));
 
        /* Indicates CDM to parse the MTK special tag from CPU
         * which also is working out for untag packets.
index e0eb695..1fa4849 100644 (file)
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        struct net_device *dev = mdev->pndev[port];
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
-       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
        u64 in_mod = reset << 8 | port;
        int err;
        int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
+
+       mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
+       if (IS_ERR(mailbox_priority)) {
+               mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+               return PTR_ERR(mailbox_priority);
+       }
+
        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
                           MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
                           MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 
        mlx4_en_stats = mailbox->buf;
 
+       memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+       counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+       err = mlx4_get_counter_stats(mdev->dev, counter_index,
+                                    &tmp_counter_stats, reset);
+
+       /* 0xffs indicates invalid value */
+       memset(mailbox_priority->buf, 0xff,
+              sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+
+       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
+               memset(mailbox_priority->buf, 0,
+                      sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+               err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
+                                  in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
+                                  0, MLX4_CMD_DUMP_ETH_STATS,
+                                  MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+               if (err)
+                       goto out;
+       }
+
+       flowstats = mailbox_priority->buf;
+
        spin_lock_bh(&priv->stats_lock);
 
        mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
        priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
 
-       spin_unlock_bh(&priv->stats_lock);
-
-       memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
-       counter_index = mlx4_get_default_counter_index(mdev->dev, port);
-       err = mlx4_get_counter_stats(mdev->dev, counter_index,
-                                    &tmp_counter_stats, reset);
-
-       /* 0xffs indicates invalid value */
-       memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
-
-       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
-               memset(mailbox->buf, 0,
-                      sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
-               err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
-                                  in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
-                                  0, MLX4_CMD_DUMP_ETH_STATS,
-                                  MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
-               if (err)
-                       goto out;
-       }
-
-       flowstats = mailbox->buf;
-
-       spin_lock_bh(&priv->stats_lock);
-
        if (tmp_counter_stats.counter_mode == 0) {
                priv->pf_stats.rx_bytes   = be64_to_cpu(tmp_counter_stats.rx_bytes);
                priv->pf_stats.tx_bytes   = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 
 out:
        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+       mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
        return err;
 }
 
index 88699b1..946d9db 100644 (file)
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
                if (priv->mdev->dev->caps.flags &
                                        MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
                        buf[3] = mlx4_en_test_registers(priv);
-                       if (priv->port_up)
+                       if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
                                buf[4] = mlx4_en_test_loopback(priv);
                }
 
index 1856e27..2b72677 100644 (file)
 #define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
 #define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
+#define PREAMBLE_LEN           8
+#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
+                                 ETH_HLEN + PREAMBLE_LEN)
 
 #define MLX4_EN_MIN_MTU                46
 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
index 04304dd..606a0e0 100644 (file)
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
                                                MLX4_MAX_PORTS;
                                else
                                        res_alloc->guaranteed[t] = 0;
-                               res_alloc->res_free -= res_alloc->guaranteed[t];
                                break;
                        default:
                                break;
index 1fffdeb..e9a1fbc 100644 (file)
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
        case MLX5_CMD_OP_QUERY_Q_COUNTER:
-       case MLX5_CMD_OP_SET_RATE_LIMIT:
+       case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
        case MLX5_CMD_OP_QUERY_RATE_LIMIT:
        case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
        case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
-       MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
        MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
index c0872b3..c2d89bf 100644 (file)
@@ -82,6 +82,9 @@
        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
 #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
+       (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
+       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
 
 #define MLX5_MPWRQ_LOG_WQE_SZ                  18
 #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -590,6 +593,7 @@ struct mlx5e_channel {
        struct mlx5_core_dev      *mdev;
        struct hwtstamp_config    *tstamp;
        int                        ix;
+       int                        cpu;
 };
 
 struct mlx5e_channels {
@@ -891,7 +895,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
                           u16 vid);
 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_timestamp_set(struct mlx5e_priv *priv);
+void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 
 struct mlx5e_redirect_rqt_param {
        bool is_rss;
@@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-                             struct mlx5e_params *params, u8 rq_type);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+                              struct mlx5e_params *params,
+                              u8 rq_type);
 
 static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 {
index c6d90b6..3d46ef4 100644 (file)
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
                                    struct ieee_ets *ets)
 {
+       bool have_ets_tc = false;
        int bw_sum = 0;
        int i;
 
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
        }
 
        /* Validate Bandwidth Sum */
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+                       have_ets_tc = true;
                        bw_sum += ets->tc_tx_bw[i];
+               }
+       }
 
-       if (bw_sum != 0 && bw_sum != 100) {
+       if (have_ets_tc && bw_sum != 100) {
                netdev_err(netdev,
                           "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
@@ -918,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
 
 static void mlx5e_ets_init(struct mlx5e_priv *priv)
 {
-       int i;
        struct ieee_ets ets;
+       int err;
+       int i;
 
        if (!MLX5_CAP_GEN(priv->mdev, ets))
                return;
@@ -932,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
                ets.prio_tc[i] = i;
        }
 
-       /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
-       ets.prio_tc[0] = 1;
-       ets.prio_tc[1] = 0;
+       if (ets.ets_cap > 1) {
+               /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+               ets.prio_tc[0] = 1;
+               ets.prio_tc[1] = 0;
+       }
 
-       mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+       err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+       if (err)
+               netdev_err(priv->netdev,
+                          "%s, Failed to init ETS: %d\n", __func__, err);
 }
 
 enum {
index 23425f0..ea5fff2 100644 (file)
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
                return;
 
        mutex_lock(&priv->state_lock);
-       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-               mlx5e_update_stats(priv, true);
+       mlx5e_update_stats(priv, true);
        mutex_unlock(&priv->state_lock);
 
        for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -1523,8 +1522,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        new_channels.params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
 
-       mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
-                                new_channels.params.rq_wq_type);
+       new_channels.params.mpwqe_log_stride_sz =
+               MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
+       new_channels.params.mpwqe_log_num_strides =
+               MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                priv->channels.params = new_channels.params;
@@ -1536,6 +1537,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
                return err;
 
        mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+       mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
+                 MLX5E_GET_PFLAG(&priv->channels.params,
+                                 MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
+
        return 0;
 }
 
index d2b057a..d8aefee 100644 (file)
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
        struct mlx5e_cq_param      icosq_cq;
 };
 
-static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
-{
-       return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
-}
-
 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 {
        return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
                MLX5_CAP_ETH(mdev, reg_umr_sq);
 }
 
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-                             struct mlx5e_params *params, u8 rq_type)
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+                              struct mlx5e_params *params, u8 rq_type)
 {
        params->rq_wq_type = rq_type;
        params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
                params->log_rq_size = is_kdump_kernel() ?
                        MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
                        MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
-               params->mpwqe_log_stride_sz =
-                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-                       MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
-                       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+               params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
+                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
                params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
                        params->mpwqe_log_stride_sz;
                break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
 }
 
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+                               struct mlx5e_params *params)
 {
        u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
                    !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
                    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
                    MLX5_WQ_TYPE_LINKED_LIST;
-       mlx5e_set_rq_type_params(mdev, params, rq_type);
+       mlx5e_init_rq_type_params(mdev, params, rq_type);
 }
 
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
        int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
        int mtt_sz = mlx5e_get_wqe_mtt_sz();
        int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
-       int node = mlx5e_get_node(c->priv, c->ix);
        int i;
 
        rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
-                                       GFP_KERNEL, node);
+                                     GFP_KERNEL, cpu_to_node(c->cpu));
        if (!rq->mpwqe.info)
                goto err_out;
 
        /* We allocate more than mtt_sz as we will align the pointer */
-       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
-                                       GFP_KERNEL, node);
+       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+                                       cpu_to_node(c->cpu));
        if (unlikely(!rq->mpwqe.mtt_no_align))
                goto err_free_wqe_info;
 
@@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        int err;
        int i;
 
-       rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       rqp->wq.db_numa_node = cpu_to_node(c->cpu);
 
        err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
                                &rq->wq_ctrl);
@@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                rq->wqe.frag_info =
                        kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
-                                    GFP_KERNEL,
-                                    mlx5e_get_node(c->priv, c->ix));
+                                    GFP_KERNEL, cpu_to_node(c->cpu));
                if (!rq->wqe.frag_info) {
                        err = -ENOMEM;
                        goto err_rq_wq_destroy;
@@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
        struct mlx5_core_dev *mdev = c->priv->mdev;
        int err;
 
-       param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
-       param->wq.db_numa_node  = mlx5e_get_node(c->priv, c->ix);
+       param->wq.buf_numa_node = cpu_to_node(c->cpu);
+       param->wq.db_numa_node  = cpu_to_node(c->cpu);
        param->eq_ix   = c->ix;
 
        err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
        mlx5e_free_cq(cq);
 }
 
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+       return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
                             struct mlx5e_channel_param *cparam)
@@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 {
        struct mlx5e_cq_moder icocq_moder = {0, 0};
        struct net_device *netdev = priv->netdev;
+       int cpu = mlx5e_get_cpu(priv, ix);
        struct mlx5e_channel *c;
        unsigned int irq;
        int err;
        int eqn;
 
-       c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
+       c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
                return -ENOMEM;
 
@@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->mdev     = priv->mdev;
        c->tstamp   = &priv->tstamp;
        c->ix       = ix;
+       c->cpu      = cpu;
        c->pdev     = &priv->mdev->pdev->dev;
        c->netdev   = priv->netdev;
        c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_activate_txqsq(&c->sq[tc]);
        mlx5e_activate_rq(&c->rq);
-       netif_set_xps_queue(c->netdev,
-               mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
+       netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
 }
 
 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2671,7 +2669,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
                netif_carrier_on(netdev);
 }
 
-void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 {
        priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
        priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2692,7 +2690,6 @@ int mlx5e_open_locked(struct net_device *netdev)
        mlx5e_activate_priv_channels(priv);
        if (priv->profile->update_carrier)
                priv->profile->update_carrier(priv);
-       mlx5e_timestamp_set(priv);
 
        if (priv->profile->update_stats)
                queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -3221,12 +3218,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
        return 0;
 }
 
-#define MLX5E_SET_FEATURE(netdev, feature, enable)     \
+#define MLX5E_SET_FEATURE(features, feature, enable)   \
        do {                                            \
                if (enable)                             \
-                       netdev->features |= feature;    \
+                       *features |= feature;           \
                else                                    \
-                       netdev->features &= ~feature;   \
+                       *features &= ~feature;          \
        } while (0)
 
 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3349,6 +3346,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
 #endif
 
 static int mlx5e_handle_feature(struct net_device *netdev,
+                               netdev_features_t *features,
                                netdev_features_t wanted_features,
                                netdev_features_t feature,
                                mlx5e_feature_handler feature_handler)
@@ -3367,34 +3365,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
                return err;
        }
 
-       MLX5E_SET_FEATURE(netdev, feature, enable);
+       MLX5E_SET_FEATURE(features, feature, enable);
        return 0;
 }
 
 static int mlx5e_set_features(struct net_device *netdev,
                              netdev_features_t features)
 {
+       netdev_features_t oper_features = netdev->features;
        int err;
 
-       err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
-                                   set_feature_lro);
-       err |= mlx5e_handle_feature(netdev, features,
+       err  = mlx5e_handle_feature(netdev, &oper_features, features,
+                                   NETIF_F_LRO, set_feature_lro);
+       err |= mlx5e_handle_feature(netdev, &oper_features, features,
                                    NETIF_F_HW_VLAN_CTAG_FILTER,
                                    set_feature_cvlan_filter);
-       err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
-                                   set_feature_tc_num_filters);
-       err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
-                                   set_feature_rx_all);
-       err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
-                                   set_feature_rx_fcs);
-       err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
-                                   set_feature_rx_vlan);
+       err |= mlx5e_handle_feature(netdev, &oper_features, features,
+                                   NETIF_F_HW_TC, set_feature_tc_num_filters);
+       err |= mlx5e_handle_feature(netdev, &oper_features, features,
+                                   NETIF_F_RXALL, set_feature_rx_all);
+       err |= mlx5e_handle_feature(netdev, &oper_features, features,
+                                   NETIF_F_RXFCS, set_feature_rx_fcs);
+       err |= mlx5e_handle_feature(netdev, &oper_features, features,
+                                   NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
 #ifdef CONFIG_RFS_ACCEL
-       err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
-                                   set_feature_arfs);
+       err |= mlx5e_handle_feature(netdev, &oper_features, features,
+                                   NETIF_F_NTUPLE, set_feature_arfs);
 #endif
 
-       return err ? -EINVAL : 0;
+       if (err) {
+               netdev->features = oper_features;
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -3679,6 +3683,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                                                     struct sk_buff *skb,
                                                     netdev_features_t features)
 {
+       unsigned int offset = 0;
        struct udphdr *udph;
        u8 proto;
        u16 port;
@@ -3688,7 +3693,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                proto = ip_hdr(skb)->protocol;
                break;
        case htons(ETH_P_IPV6):
-               proto = ipv6_hdr(skb)->nexthdr;
+               proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
                break;
        default:
                goto out;
@@ -4140,6 +4145,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
        INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
        INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+       mlx5e_timestamp_init(priv);
 }
 
 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
index 2c43606..3409d86 100644 (file)
@@ -877,6 +877,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
 
        mlx5e_build_rep_params(mdev, &priv->channels.params);
        mlx5e_build_rep_netdev(netdev);
+
+       mlx5e_timestamp_init(priv);
 }
 
 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
index e401d9d..b69a705 100644 (file)
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
                return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
                                                   MLX5E_AM_STATS_WORSE;
 
+       if (!prev->ppms)
+               return curr->ppms ? MLX5E_AM_STATS_BETTER :
+                                   MLX5E_AM_STATS_SAME;
+
        if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
                return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
                                                   MLX5E_AM_STATS_WORSE;
+       if (!prev->epms)
+               return MLX5E_AM_STATS_SAME;
 
        if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
                return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
index 1f1f8af..5a46082 100644 (file)
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
        int err = 0;
 
        /* Temporarily enable local_lb */
-       if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
-               mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
-               if (!lbtp->local_lb)
-                       mlx5_nic_vport_update_local_lb(priv->mdev, true);
+       err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
+       if (err)
+               return err;
+
+       if (!lbtp->local_lb) {
+               err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
+               if (err)
+                       return err;
        }
 
        err = mlx5e_refresh_tirs(priv, true);
        if (err)
-               return err;
+               goto out;
 
        lbtp->loopback_ok = false;
        init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
        lbtp->pt.dev = priv->netdev;
        lbtp->pt.af_packet_priv = lbtp;
        dev_add_pack(&lbtp->pt);
+
+       return 0;
+
+out:
+       if (!lbtp->local_lb)
+               mlx5_nic_vport_update_local_lb(priv->mdev, false);
+
        return err;
 }
 
 static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
                                        struct mlx5e_lbt_priv *lbtp)
 {
-       if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
-               if (!lbtp->local_lb)
-                       mlx5_nic_vport_update_local_lb(priv->mdev, false);
-       }
+       if (!lbtp->local_lb)
+               mlx5_nic_vport_update_local_lb(priv->mdev, false);
 
        dev_remove_pack(&lbtp->pt);
        mlx5e_refresh_tirs(priv, false);
index 6077186..e7e7cef 100644 (file)
@@ -466,7 +466,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
                        break;
                case MLX5_EVENT_TYPE_CQ_ERROR:
                        cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
-                       mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+                       mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
                                       cqn, eqe->data.cq_err.syndrome);
                        mlx5_cq_event(dev, cqn, eqe->type);
                        break;
@@ -775,7 +775,7 @@ err1:
        return err;
 }
 
-int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+void mlx5_stop_eqs(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->priv.eq_table;
        int err;
@@ -784,22 +784,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
        if (MLX5_CAP_GEN(dev, pg)) {
                err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
                if (err)
-                       return err;
+                       mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
+                                     err);
        }
 #endif
 
        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
        if (err)
-               return err;
+               mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
+                             err);
 
-       mlx5_destroy_unmap_eq(dev, &table->async_eq);
+       err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+       if (err)
+               mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
+                             err);
        mlx5_cmd_use_polling(dev);
 
        err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
        if (err)
-               mlx5_cmd_use_events(dev);
-
-       return err;
+               mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
+                             err);
 }
 
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
index 3c11d6e..1496296 100644 (file)
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
        u8 actual_size;
        int err;
 
+       if (!size)
+               return -EINVAL;
+
        if (!fdev->mdev)
                return -ENOTCONN;
 
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
        u8 actual_size;
        int err;
 
+       if (!size)
+               return -EINVAL;
+
        if (!fdev->mdev)
                return -ENOTCONN;
 
index c70fd66..dfaad9e 100644 (file)
@@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
 static void del_sw_flow_table(struct fs_node *node);
 static void del_sw_flow_group(struct fs_node *node);
 static void del_sw_fte(struct fs_node *node);
+static void del_sw_prio(struct fs_node *node);
+static void del_sw_ns(struct fs_node *node);
 /* Delete rule (destination) is special case that 
  * requires to lock the FTE for all the deletion process.
  */
@@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
        return NULL;
 }
 
+static void del_sw_ns(struct fs_node *node)
+{
+       kfree(node);
+}
+
+static void del_sw_prio(struct fs_node *node)
+{
+       kfree(node);
+}
+
 static void del_hw_flow_table(struct fs_node *node)
 {
        struct mlx5_flow_table *ft;
@@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
                return ERR_PTR(-ENOMEM);
 
        fs_prio->node.type = FS_TYPE_PRIO;
-       tree_init_node(&fs_prio->node, NULL, NULL);
+       tree_init_node(&fs_prio->node, NULL, del_sw_prio);
        tree_add_node(&fs_prio->node, &ns->node);
        fs_prio->num_levels = num_levels;
        fs_prio->prio = prio;
@@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
                return ERR_PTR(-ENOMEM);
 
        fs_init_namespace(ns);
-       tree_init_node(&ns->node, NULL, NULL);
+       tree_init_node(&ns->node, NULL, del_sw_ns);
        tree_add_node(&ns->node, &prio->node);
        list_add_tail(&ns->node.list, &prio->node.children);
 
index 1a0e797..21d29f7 100644 (file)
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
        u32 fw;
        int i;
 
-       /* If the syndrom is 0, the device is OK and no need to print buffer */
+       /* If the syndrome is 0, the device is OK and no need to print buffer */
        if (!ioread8(&h->synd))
                return;
 
index d2a66dc..ee2f378 100644 (file)
@@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
                                   struct mlx5e_params *params)
 {
        /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
-       mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+       mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
 
        /* RQ size in ipoib by default is 512 */
        params->log_rq_size = is_kdump_kernel() ?
@@ -86,6 +86,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
        mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
        mlx5i_build_nic_params(mdev, &priv->channels.params);
 
+       mlx5e_timestamp_init(priv);
+
        /* netdev init */
        netdev->hw_features    |= NETIF_F_SG;
        netdev->hw_features    |= NETIF_F_IP_CSUM;
@@ -450,7 +452,6 @@ static int mlx5i_open(struct net_device *netdev)
 
        mlx5e_refresh_tirs(epriv, false);
        mlx5e_activate_priv_channels(epriv);
-       mlx5e_timestamp_set(epriv);
 
        mutex_unlock(&epriv->state_lock);
        return 0;
index f26f97f..582b2f1 100644 (file)
@@ -137,6 +137,17 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
 }
 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
 
+static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
+                                      bool reset, void *out, int out_size)
+{
+       u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
+
+       MLX5_SET(query_cong_statistics_in, in, opcode,
+                MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+       MLX5_SET(query_cong_statistics_in, in, clear, reset);
+       return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
+}
+
 static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
 {
        return dev->priv.lag;
@@ -633,3 +644,48 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
        /* If bonded, we do not add an IB device for PF1. */
        return false;
 }
+
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+                                u64 *values,
+                                int num_counters,
+                                size_t *offsets)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
+       struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
+       struct mlx5_lag *ldev;
+       int num_ports;
+       int ret, i, j;
+       void *out;
+
+       out = kvzalloc(outlen, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       memset(values, 0, sizeof(*values) * num_counters);
+
+       mutex_lock(&lag_mutex);
+       ldev = mlx5_lag_dev_get(dev);
+       if (ldev && mlx5_lag_is_bonded(ldev)) {
+               num_ports = MLX5_MAX_PORTS;
+               mdev[0] = ldev->pf[0].dev;
+               mdev[1] = ldev->pf[1].dev;
+       } else {
+               num_ports = 1;
+               mdev[0] = dev;
+       }
+
+       for (i = 0; i < num_ports; ++i) {
+               ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
+               if (ret)
+                       goto unlock;
+
+               for (j = 0; j < num_counters; ++j)
+                       values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
+       }
+
+unlock:
+       mutex_unlock(&lag_mutex);
+       kvfree(out);
+       return ret;
+}
+EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
index fa8aed6..5701f12 100644 (file)
@@ -423,9 +423,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
 
        switch (clock->ptp_info.pin_config[pin].func) {
        case PTP_PF_EXTTS:
+               ptp_event.index = pin;
+               ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
+                                       be64_to_cpu(eqe->data.pps.time_stamp));
                if (clock->pps_info.enabled) {
                        ptp_event.type = PTP_CLOCK_PPSUSR;
-                       ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+                       ptp_event.pps_times.ts_real =
+                                       ns_to_timespec64(ptp_event.timestamp);
                } else {
                        ptp_event.type = PTP_CLOCK_EXTTS;
                }
index 5f32344..0f88fd3 100644 (file)
@@ -317,11 +317,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 {
        struct mlx5_priv *priv = &dev->priv;
        struct mlx5_eq_table *table = &priv->eq_table;
-       struct irq_affinity irqdesc = {
-               .pre_vectors = MLX5_EQ_VEC_COMP_BASE,
-       };
        int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
        int nvec;
+       int err;
 
        nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
               MLX5_EQ_VEC_COMP_BASE;
@@ -331,22 +329,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 
        priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
        if (!priv->irq_info)
-               goto err_free_msix;
+               return -ENOMEM;
 
-       nvec = pci_alloc_irq_vectors_affinity(dev->pdev,
+       nvec = pci_alloc_irq_vectors(dev->pdev,
                        MLX5_EQ_VEC_COMP_BASE + 1, nvec,
-                       PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
-                       &irqdesc);
-       if (nvec < 0)
-               return nvec;
+                       PCI_IRQ_MSIX);
+       if (nvec < 0) {
+               err = nvec;
+               goto err_free_irq_info;
+       }
 
        table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
 
        return 0;
 
-err_free_msix:
+err_free_irq_info:
        kfree(priv->irq_info);
-       return -ENOMEM;
+       return err;
 }
 
 static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -582,8 +581,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
        int ret = 0;
 
        /* Disable local_lb by default */
-       if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
-           MLX5_CAP_GEN(dev, disable_local_lb))
+       if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
                ret = mlx5_nic_vport_update_local_lb(dev, false);
 
        return ret;
@@ -622,6 +620,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
        return (u64)timer_l | (u64)timer_h1 << 32;
 }
 
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+       if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+               mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+               return -ENOMEM;
+       }
+
+       cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+                       priv->irq_info[i].mask);
+
+       if (IS_ENABLED(CONFIG_SMP) &&
+           irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+               mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+       return 0;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+       irq_set_affinity_hint(irq, NULL);
+       free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+               err = mlx5_irq_set_affinity_hint(mdev, i);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       for (i--; i >= 0; i--)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+
+       return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
                    unsigned int *irqn)
 {
@@ -1068,9 +1123,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                goto err_stop_poll;
        }
 
-       if (boot && mlx5_init_once(dev, priv)) {
-               dev_err(&pdev->dev, "sw objs init failed\n");
-               goto err_stop_poll;
+       if (boot) {
+               err = mlx5_init_once(dev, priv);
+               if (err) {
+                       dev_err(&pdev->dev, "sw objs init failed\n");
+                       goto err_stop_poll;
+               }
        }
 
        err = mlx5_alloc_irq_vectors(dev);
@@ -1080,8 +1138,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        }
 
        dev->priv.uar = mlx5_get_uars_page(dev);
-       if (!dev->priv.uar) {
+       if (IS_ERR(dev->priv.uar)) {
                dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+               err = PTR_ERR(dev->priv.uar);
                goto err_disable_msix;
        }
 
@@ -1097,6 +1156,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                goto err_stop_eqs;
        }
 
+       err = mlx5_irq_set_affinity_hints(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+               goto err_affinity_hints;
+       }
+
        err = mlx5_init_fs(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1154,6 +1219,9 @@ err_sriov:
        mlx5_cleanup_fs(dev);
 
 err_fs:
+       mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
        free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -1222,6 +1290,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 
        mlx5_sriov_detach(dev);
        mlx5_cleanup_fs(dev);
+       mlx5_irq_clear_affinity_hints(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_put_uars_page(dev, priv->uar);
index db9e665..889130e 100644 (file)
@@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 err_cmd:
        memset(din, 0, sizeof(din));
        memset(dout, 0, sizeof(dout));
-       MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-       MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+       MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+       MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
        mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
        return err;
 }
index e651e4c..d3c33e9 100644 (file)
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
        return ret_entry;
 }
 
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
                                   u32 rate, u16 index)
 {
-       u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
-       u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)]   = {0};
+       u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
 
-       MLX5_SET(set_rate_limit_in, in, opcode,
-                MLX5_CMD_OP_SET_RATE_LIMIT);
-       MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
-       MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+       MLX5_SET(set_pp_rate_limit_in, in, opcode,
+                MLX5_CMD_OP_SET_PP_RATE_LIMIT);
+       MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
+       MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
                entry->refcount++;
        } else {
                /* new rate limit */
-               err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+               err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
                if (err) {
                        mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
                                      rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
        entry->refcount--;
        if (!entry->refcount) {
                /* need to remove rate */
-               mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+               mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
                entry->rate = 0;
        }
 
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
        /* Clear all configured rates */
        for (i = 0; i < table->max_size; i++)
                if (table->rl_entry[i].rate)
-                       mlx5_set_rate_limit_cmd(dev, 0,
-                                               table->rl_entry[i].index);
+                       mlx5_set_pp_rate_limit_cmd(dev, 0,
+                                                  table->rl_entry[i].index);
 
        kfree(dev->priv.rl_table.rl_entry);
 }
index 222b259..8b97066 100644 (file)
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
        struct mlx5_uars_page *ret;
 
        mutex_lock(&mdev->priv.bfregs.reg_head.lock);
-       if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
-               ret = alloc_uars_page(mdev, false);
-               if (IS_ERR(ret)) {
-                       ret = NULL;
-                       goto out;
-               }
-               list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
-       } else {
+       if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
                ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
                                       struct mlx5_uars_page, list);
                kref_get(&ret->ref_count);
+               goto out;
        }
+       ret = alloc_uars_page(mdev, false);
+       if (IS_ERR(ret))
+               goto out;
+       list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
 out:
        mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
 
index d653b00..a1296a6 100644 (file)
@@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
        void *in;
        int err;
 
-       mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
+       if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
+           !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+               return 0;
+
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
                return -ENOMEM;
 
-       MLX5_SET(modify_nic_vport_context_in, in,
-                field_select.disable_mc_local_lb, 1);
        MLX5_SET(modify_nic_vport_context_in, in,
                 nic_vport_context.disable_mc_local_lb, !enable);
-
-       MLX5_SET(modify_nic_vport_context_in, in,
-                field_select.disable_uc_local_lb, 1);
        MLX5_SET(modify_nic_vport_context_in, in,
                 nic_vport_context.disable_uc_local_lb, !enable);
 
+       if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
+               MLX5_SET(modify_nic_vport_context_in, in,
+                        field_select.disable_mc_local_lb, 1);
+
+       if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+               MLX5_SET(modify_nic_vport_context_in, in,
+                        field_select.disable_uc_local_lb, 1);
+
        err = mlx5_modify_nic_vport_context(mdev, in, inlen);
 
+       if (!err)
+               mlx5_core_dbg(mdev, "%s local_lb\n",
+                             enable ? "enable" : "disable");
+
        kvfree(in);
        return err;
 }
index 07a9ba6..2f74953 100644 (file)
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
        struct mlx5e_vxlan *vxlan;
 
-       spin_lock(&vxlan_db->lock);
+       spin_lock_bh(&vxlan_db->lock);
        vxlan = radix_tree_lookup(&vxlan_db->tree, port);
-       spin_unlock(&vxlan_db->lock);
+       spin_unlock_bh(&vxlan_db->lock);
 
        return vxlan;
 }
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
        struct mlx5e_vxlan *vxlan;
        int err;
 
-       if (mlx5e_vxlan_lookup_port(priv, port))
+       mutex_lock(&priv->state_lock);
+       vxlan = mlx5e_vxlan_lookup_port(priv, port);
+       if (vxlan) {
+               atomic_inc(&vxlan->refcount);
                goto free_work;
+       }
 
        if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
                goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
                goto err_delete_port;
 
        vxlan->udp_port = port;
+       atomic_set(&vxlan->refcount, 1);
 
-       spin_lock_irq(&vxlan_db->lock);
+       spin_lock_bh(&vxlan_db->lock);
        err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
-       spin_unlock_irq(&vxlan_db->lock);
+       spin_unlock_bh(&vxlan_db->lock);
        if (err)
                goto err_free;
 
@@ -113,35 +118,39 @@ err_free:
 err_delete_port:
        mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
 free_work:
+       mutex_unlock(&priv->state_lock);
        kfree(vxlan_work);
 }
 
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv         = vxlan_work->priv;
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       u16 port = vxlan_work->port;
        struct mlx5e_vxlan *vxlan;
+       bool remove = false;
 
-       spin_lock_irq(&vxlan_db->lock);
-       vxlan = radix_tree_delete(&vxlan_db->tree, port);
-       spin_unlock_irq(&vxlan_db->lock);
-
+       mutex_lock(&priv->state_lock);
+       spin_lock_bh(&vxlan_db->lock);
+       vxlan = radix_tree_lookup(&vxlan_db->tree, port);
        if (!vxlan)
-               return;
-
-       mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
-       kfree(vxlan);
-}
+               goto out_unlock;
 
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
-       struct mlx5e_vxlan_work *vxlan_work =
-               container_of(work, struct mlx5e_vxlan_work, work);
-       struct mlx5e_priv *priv = vxlan_work->priv;
-       u16 port = vxlan_work->port;
+       if (atomic_dec_and_test(&vxlan->refcount)) {
+               radix_tree_delete(&vxlan_db->tree, port);
+               remove = true;
+       }
 
-       __mlx5e_vxlan_core_del_port(priv, port);
+out_unlock:
+       spin_unlock_bh(&vxlan_db->lock);
 
+       if (remove) {
+               mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+               kfree(vxlan);
+       }
+       mutex_unlock(&priv->state_lock);
        kfree(vxlan_work);
 }
 
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
        struct mlx5e_vxlan *vxlan;
        unsigned int port = 0;
 
-       spin_lock_irq(&vxlan_db->lock);
+       /* Lockless since we are the only radix-tree consumers, wq is disabled */
        while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
                port = vxlan->udp_port;
-               spin_unlock_irq(&vxlan_db->lock);
-               __mlx5e_vxlan_core_del_port(priv, (u16)port);
-               spin_lock_irq(&vxlan_db->lock);
+               radix_tree_delete(&vxlan_db->tree, port);
+               mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+               kfree(vxlan);
        }
-       spin_unlock_irq(&vxlan_db->lock);
 }
index 5def12c..5ef6ae7 100644 (file)
@@ -36,6 +36,7 @@
 #include "en.h"
 
 struct mlx5e_vxlan {
+       atomic_t refcount;
        u16 udp_port;
 };
 
index 23f7d82..6ef20e5 100644 (file)
@@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
                return 0;
        }
 
-       wmb(); /* reset needs to be written before we read control register */
+       /* Reset needs to be written before we read control register, and
+        * we must wait for the HW to become responsive once again
+        */
+       wmb();
+       msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+
        end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
        do {
                u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
index a644120..fb082ad 100644 (file)
@@ -59,6 +59,7 @@
 #define MLXSW_PCI_SW_RESET                     0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT             BIT(0)
 #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       5000
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS          100
 #define MLXSW_PCI_FW_READY                     0xA1844
 #define MLXSW_PCI_FW_READY_MASK                        0xFFFF
 #define MLXSW_PCI_FW_READY_MAGIC               0x5E
index 2d0897b..c3837ca 100644 (file)
@@ -4300,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
 {
+       u16 vid = 1;
        int err;
 
        err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4312,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
                                     true, false);
        if (err)
                goto err_port_vlan_set;
+
+       for (; vid <= VLAN_N_VID - 1; vid++) {
+               err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+                                                    vid, false);
+               if (err)
+                       goto err_vid_learning_set;
+       }
+
        return 0;
 
+err_vid_learning_set:
+       for (vid--; vid >= 1; vid--)
+               mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
 err_port_vlan_set:
        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
 err_port_stp_set:
@@ -4323,6 +4335,12 @@ err_port_stp_set:
 
 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
 {
+       u16 vid;
+
+       for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
+               mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+                                              vid, true);
+
        mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
                               false, false);
        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
@@ -4358,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
                }
                if (!info->linking)
                        break;
-               if (netdev_has_any_upper_dev(upper_dev)) {
+               if (netdev_has_any_upper_dev(upper_dev) &&
+                   (!netif_is_bridge_master(upper_dev) ||
+                    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+                                                         upper_dev))) {
                        NL_SET_ERR_MSG(extack,
                                       "spectrum: Enslaving a port to a device that already has an upper device is not supported");
                        return -EINVAL;
@@ -4486,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
                                              u16 vid)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct netdev_notifier_changeupper_info *info = ptr;
        struct netlink_ext_ack *extack;
        struct net_device *upper_dev;
@@ -4502,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
                }
                if (!info->linking)
                        break;
-               if (netdev_has_any_upper_dev(upper_dev)) {
+               if (netdev_has_any_upper_dev(upper_dev) &&
+                   (!netif_is_bridge_master(upper_dev) ||
+                    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+                                                         upper_dev))) {
                        NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
                        return -EINVAL;
                }
index 432ab9b..05ce1be 100644 (file)
@@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
                                struct net_device *brport_dev,
                                struct net_device *br_dev);
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+                                        const struct net_device *br_dev);
 
 /* spectrum.c */
 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
index c33beac..b5397da 100644 (file)
@@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
                                  int tclass_num, u32 min, u32 max,
                                  u32 probability, bool is_ecn)
 {
-       char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
+       char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+       char cwtp_cmd[MLXSW_REG_CWTP_LEN];
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        int err;
 
@@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
        if (err)
                return err;
 
-       mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
+       mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
                             MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
 
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
 }
 
 static int
index 632c7b2..7042c85 100644 (file)
@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
        int err;
 
-       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
-       if (err)
-               return err;
        fib->lpm_tree = new_tree;
        mlxsw_sp_lpm_tree_hold(new_tree);
+       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+       if (err)
+               goto err_tree_bind;
        mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
        return 0;
+
+err_tree_bind:
+       mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+       fib->lpm_tree = old_tree;
+       return err;
 }
 
 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
@@ -868,11 +873,14 @@ err_tree_replace:
        return err;
 
 no_replace:
-       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
-       if (err)
-               return err;
        fib->lpm_tree = new_tree;
        mlxsw_sp_lpm_tree_hold(new_tree);
+       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+       if (err) {
+               mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+               fib->lpm_tree = NULL;
+               return err;
+       }
        return 0;
 }
 
@@ -1370,8 +1378,9 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
                mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
 }
 
-static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
-                                       struct mlxsw_sp_rif *rif);
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_rif *old_rif,
+                                        struct mlxsw_sp_rif *new_rif);
 static int
 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -1389,17 +1398,18 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
                return PTR_ERR(new_lb_rif);
        ipip_entry->ol_lb = new_lb_rif;
 
-       if (keep_encap) {
-               list_splice_init(&old_lb_rif->common.nexthop_list,
-                                &new_lb_rif->common.nexthop_list);
-               mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
-       }
+       if (keep_encap)
+               mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
+                                            &new_lb_rif->common);
 
        mlxsw_sp_rif_destroy(&old_lb_rif->common);
 
        return 0;
 }
 
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_rif *rif);
+
 /**
  * Update the offload related to an IPIP entry. This always updates decap, and
  * in addition to that it also:
@@ -1449,9 +1459,27 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_ipip_entry *ipip_entry =
                mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+       enum mlxsw_sp_l3proto ul_proto;
+       union mlxsw_sp_l3addr saddr;
+       u32 ul_tb_id;
 
        if (!ipip_entry)
                return 0;
+
+       /* For flat configuration cases, moving overlay to a different VRF might
+        * cause local address conflict, and the conflicting tunnels need to be
+        * demoted.
+        */
+       ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
+       ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
+       saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
+       if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+                                                saddr, ul_tb_id,
+                                                ipip_entry)) {
+               mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+               return 0;
+       }
+
        return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
                                                   true, false, false, extack);
 }
@@ -1914,11 +1942,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
        dipn = htonl(dip);
        dev = mlxsw_sp->router->rifs[rif]->dev;
        n = neigh_lookup(&arp_tbl, &dipn, dev);
-       if (!n) {
-               netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
-                          &dip);
+       if (!n)
                return;
-       }
 
        netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
        neigh_event_send(n, NULL);
@@ -1945,11 +1970,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
 
        dev = mlxsw_sp->router->rifs[rif]->dev;
        n = neigh_lookup(&nd_tbl, &dip, dev);
-       if (!n) {
-               netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
-                          &dip);
+       if (!n)
                return;
-       }
 
        netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
        neigh_event_send(n, NULL);
@@ -2416,25 +2438,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
        rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
 }
 
-static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
-                                   const struct mlxsw_sp_rif *rif)
-{
-       char rauht_pl[MLXSW_REG_RAUHT_LEN];
-
-       mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
-                            rif->rif_index, rif->addr);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
-}
-
 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
                                         struct mlxsw_sp_rif *rif)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
 
-       mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
        list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
-                                rif_list_node)
+                                rif_list_node) {
+               mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
                mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+       }
 }
 
 enum mlxsw_sp_nexthop_type {
@@ -3217,7 +3230,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
 {
        if (!removing)
                nh->should_offload = 1;
-       else if (nh->offloaded)
+       else
                nh->should_offload = 0;
        nh->update = 1;
 }
@@ -3343,22 +3356,19 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
        return ul_dev ? (ul_dev->flags & IFF_UP) : true;
 }
 
-static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
-                                     struct mlxsw_sp_nexthop *nh,
-                                     struct net_device *ol_dev)
+static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
+                                      struct mlxsw_sp_nexthop *nh,
+                                      struct mlxsw_sp_ipip_entry *ipip_entry)
 {
        bool removing;
 
        if (!nh->nh_grp->gateway || nh->ipip_entry)
-               return 0;
-
-       nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
-       if (!nh->ipip_entry)
-               return -ENOENT;
+               return;
 
-       removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
+       nh->ipip_entry = ipip_entry;
+       removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
        __mlxsw_sp_nexthop_neigh_update(nh, removing);
-       return 0;
+       mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
 }
 
 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
@@ -3403,21 +3413,21 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
                                       struct mlxsw_sp_nexthop *nh,
                                       struct fib_nh *fib_nh)
 {
-       struct mlxsw_sp_router *router = mlxsw_sp->router;
+       const struct mlxsw_sp_ipip_ops *ipip_ops;
        struct net_device *dev = fib_nh->nh_dev;
-       enum mlxsw_sp_ipip_type ipipt;
+       struct mlxsw_sp_ipip_entry *ipip_entry;
        struct mlxsw_sp_rif *rif;
        int err;
 
-       if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
-           router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
-                                                    MLXSW_SP_L3_PROTO_IPV4)) {
-               nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
-               err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
-               if (err)
-                       return err;
-               mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
-               return 0;
+       ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+       if (ipip_entry) {
+               ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+               if (ipip_ops->can_offload(mlxsw_sp, dev,
+                                         MLXSW_SP_L3_PROTO_IPV4)) {
+                       nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+                       mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+                       return 0;
+               }
        }
 
        nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3545,6 +3555,18 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
        }
 }
 
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_rif *old_rif,
+                                        struct mlxsw_sp_rif *new_rif)
+{
+       struct mlxsw_sp_nexthop *nh;
+
+       list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
+       list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
+               nh->rif = new_rif;
+       mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
+}
+
 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
                                           struct mlxsw_sp_rif *rif)
 {
@@ -3996,7 +4018,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
        case RTN_LOCAL:
                ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
                                                 MLXSW_SP_L3_PROTO_IPV4, dip);
-               if (ipip_entry) {
+               if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
                        fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
                        return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
                                                             fib_entry,
@@ -4694,21 +4716,21 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
                                       struct mlxsw_sp_nexthop *nh,
                                       const struct rt6_info *rt)
 {
-       struct mlxsw_sp_router *router = mlxsw_sp->router;
+       const struct mlxsw_sp_ipip_ops *ipip_ops;
+       struct mlxsw_sp_ipip_entry *ipip_entry;
        struct net_device *dev = rt->dst.dev;
-       enum mlxsw_sp_ipip_type ipipt;
        struct mlxsw_sp_rif *rif;
        int err;
 
-       if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
-           router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
-                                                    MLXSW_SP_L3_PROTO_IPV6)) {
-               nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
-               err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
-               if (err)
-                       return err;
-               mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
-               return 0;
+       ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+       if (ipip_entry) {
+               ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+               if (ipip_ops->can_offload(mlxsw_sp, dev,
+                                         MLXSW_SP_L3_PROTO_IPV6)) {
+                       nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+                       mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+                       return 0;
+               }
        }
 
        nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
index 7b8548e..593ad31 100644 (file)
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
        return NULL;
 }
 
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+                                        const struct net_device *br_dev)
+{
+       return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+}
+
 static struct mlxsw_sp_bridge_device *
 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
                              struct net_device *br_dev)
index e379b78..13190aa 100644 (file)
@@ -82,10 +82,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
        return nfp_net_ebpf_capable(nn) ? "BPF" : "";
 }
 
+static int
+nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+       int err;
+
+       nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
+       if (!nn->app_priv)
+               return -ENOMEM;
+
+       err = nfp_app_nic_vnic_alloc(app, nn, id);
+       if (err)
+               goto err_free_priv;
+
+       return 0;
+err_free_priv:
+       kfree(nn->app_priv);
+       return err;
+}
+
 static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
 {
+       struct nfp_bpf_vnic *bv = nn->app_priv;
+
        if (nn->dp.bpf_offload_xdp)
                nfp_bpf_xdp_offload(app, nn, NULL);
+       WARN_ON(bv->tc_prog);
+       kfree(bv);
 }
 
 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@@ -93,6 +116,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
 {
        struct tc_cls_bpf_offload *cls_bpf = type_data;
        struct nfp_net *nn = cb_priv;
+       struct bpf_prog *oldprog;
+       struct nfp_bpf_vnic *bv;
+       int err;
 
        if (type != TC_SETUP_CLSBPF ||
            !tc_can_offload(nn->dp.netdev) ||
@@ -100,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
            cls_bpf->common.protocol != htons(ETH_P_ALL) ||
            cls_bpf->common.chain_index)
                return -EOPNOTSUPP;
-       if (nn->dp.bpf_offload_xdp)
-               return -EBUSY;
 
        /* Only support TC direct action */
        if (!cls_bpf->exts_integrated ||
@@ -110,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
                return -EOPNOTSUPP;
        }
 
-       switch (cls_bpf->command) {
-       case TC_CLSBPF_REPLACE:
-               return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
-       case TC_CLSBPF_ADD:
-               return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
-       case TC_CLSBPF_DESTROY:
-               return nfp_net_bpf_offload(nn, NULL, true);
-       default:
+       if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
                return -EOPNOTSUPP;
+
+       bv = nn->app_priv;
+       oldprog = cls_bpf->oldprog;
+
+       /* Don't remove if oldprog doesn't match driver's state */
+       if (bv->tc_prog != oldprog) {
+               oldprog = NULL;
+               if (!cls_bpf->prog)
+                       return 0;
        }
+
+       err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+       if (err)
+               return err;
+
+       bv->tc_prog = cls_bpf->prog;
+       return 0;
 }
 
 static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -167,7 +200,7 @@ const struct nfp_app_type app_bpf = {
 
        .extra_cap      = nfp_bpf_extra_cap,
 
-       .vnic_alloc     = nfp_app_nic_vnic_alloc,
+       .vnic_alloc     = nfp_bpf_vnic_alloc,
        .vnic_free      = nfp_bpf_vnic_free,
 
        .setup_tc       = nfp_bpf_setup_tc,
index 082a15f..57b6043 100644 (file)
@@ -172,6 +172,14 @@ struct nfp_prog {
        struct list_head insns;
 };
 
+/**
+ * struct nfp_bpf_vnic - per-vNIC BPF priv structure
+ * @tc_prog:   currently loaded cls_bpf program
+ */
+struct nfp_bpf_vnic {
+       struct bpf_prog *tc_prog;
+};
+
 int nfp_bpf_jit(struct nfp_prog *prog);
 
 extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
index 1a603fd..99b0487 100644 (file)
@@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
                return err;
        }
        nn_writeb(nn, ctrl_offset, entry->entry);
+       nfp_net_irq_unmask(nn, entry->entry);
 
        return 0;
 }
@@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
                                 unsigned int vector_idx)
 {
        nn_writeb(nn, ctrl_offset, 0xff);
+       nn_pci_flush(nn);
        free_irq(nn->irq_entries[vector_idx].vector, nn);
 }
 
index 2801ecd..6c02b2d 100644 (file)
@@ -333,7 +333,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
            ls >= ARRAY_SIZE(ls_to_ethtool))
                return 0;
 
-       cmd->base.speed = ls_to_ethtool[sts];
+       cmd->base.speed = ls_to_ethtool[ls];
        cmd->base.duplex = DUPLEX_FULL;
 
        return 0;
index 924a05e..78b36c6 100644 (file)
@@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port,
 {
        u8 __iomem *mem = port->eth_stats;
 
-       /* TX and RX stats are flipped as we are returning the stats as seen
-        * at the switch port corresponding to the phys port.
-        */
-       stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
-       stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
-       stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
+       stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
+       stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
+       stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
 
-       stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
-       stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
-       stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
+       stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
+       stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
+       stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
 }
 
 static void
index ac8439c..481876b 100644 (file)
@@ -1986,9 +1986,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
                                         tx_skb->dma_len,
                                         DMA_TO_DEVICE);
                else
-                       pci_unmap_page(np->pci_dev, tx_skb->dma,
+                       dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
                                       tx_skb->dma_len,
-                                      PCI_DMA_TODEVICE);
+                                      DMA_TO_DEVICE);
                tx_skb->dma = 0;
        }
 }
index c8c4b39..b7abb82 100644 (file)
@@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
        kfree(p_rdma_info);
 }
 
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+        struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+        spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+        qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+        spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
+{
+       qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
+}
+
 static void qed_rdma_free(struct qed_hwfn *p_hwfn)
 {
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
 
+       qed_rdma_free_reserved_lkey(p_hwfn);
        qed_rdma_resc_free(p_hwfn);
 }
 
@@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
 {
        struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
 
-       /* The first DPI is reserved for the Kernel */
-       __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
-
        /* Tid 0 will be used as the key for "reserved MR".
         * The driver should allocate memory for it so it can be loaded but no
         * ramrod should be passed on it.
@@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
        return p_hwfn->p_rdma_info->dev;
 }
 
-static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
-{
-       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
-       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
-       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
-       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
-       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
 {
        struct qed_hwfn *p_hwfn;
index be48d9a..3588081 100644 (file)
@@ -776,6 +776,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
        int rc = 0;
        struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
        bool b_ret_ent = true;
+       bool eblock;
 
        if (!p_hwfn)
                return -EINVAL;
@@ -794,6 +795,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
        if (rc)
                goto spq_post_fail;
 
+       /* Check if entry is in block mode before qed_spq_add_entry,
+        * which might kfree p_ent.
+        */
+       eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
+
        /* Add the request to the pending queue */
        rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
        if (rc)
@@ -811,7 +817,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
 
        spin_unlock_bh(&p_spq->lock);
 
-       if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+       if (eblock) {
                /* For entries in QED BLOCK mode, the completion code cannot
                 * perform the necessary cleanup - if it did, we couldn't
                 * access p_ent here to see whether it's successful or not.
index 18461fc..53dbf1e 100644 (file)
@@ -47,6 +47,7 @@
 #define MDIO_CLK_25_28                                               7
 
 #define MDIO_WAIT_TIMES                                           1000
+#define MDIO_STATUS_DELAY_TIME                                       1
 
 static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 {
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
                               !(reg & (MDIO_START | MDIO_BUSY)),
-                              100, MDIO_WAIT_TIMES * 100))
+                              MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
                return -EIO;
 
        return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
        writel(reg, adpt->base + EMAC_MDIO_CTRL);
 
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
-                              !(reg & (MDIO_START | MDIO_BUSY)), 100,
-                              MDIO_WAIT_TIMES * 100))
+                              !(reg & (MDIO_START | MDIO_BUSY)),
+                              MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
                return -EIO;
 
        return 0;
index 70c92b6..38c924b 100644 (file)
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
                return ret;
        }
 
-       ret = emac_mac_up(adpt);
+       ret = adpt->phy.open(adpt);
        if (ret) {
                emac_mac_rx_tx_rings_free_all(adpt);
                free_irq(irq->irq, irq);
                return ret;
        }
 
-       ret = adpt->phy.open(adpt);
+       ret = emac_mac_up(adpt);
        if (ret) {
-               emac_mac_down(adpt);
                emac_mac_rx_tx_rings_free_all(adpt);
                free_irq(irq->irq, irq);
+               adpt->phy.close(adpt);
                return ret;
        }
 
index 71bee1a..df21e90 100644 (file)
@@ -195,6 +195,7 @@ err2:
 err1:
        rmnet_unregister_real_device(real_dev, port);
 err0:
+       kfree(ep);
        return err;
 }
 
index 29842cc..08e4afc 100644 (file)
@@ -126,12 +126,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
 
        if (skb_headroom(skb) < required_headroom) {
                if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
-                       return RMNET_MAP_CONSUMED;
+                       goto fail;
        }
 
        map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
        if (!map_header)
-               return RMNET_MAP_CONSUMED;
+               goto fail;
 
        if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
                if (mux_id == 0xff)
@@ -143,6 +143,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
        skb->protocol = htons(ETH_P_MAP);
 
        return RMNET_MAP_SUCCESS;
+
+fail:
+       kfree_skb(skb);
+       return RMNET_MAP_CONSUMED;
 }
 
 static void
index fc0d5fa..734286e 100644 (file)
@@ -2244,19 +2244,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
        void __iomem *ioaddr = tp->mmio_addr;
        dma_addr_t paddr = tp->counters_phys_addr;
        u32 cmd;
-       bool ret;
 
        RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+       RTL_R32(CounterAddrHigh);
        cmd = (u64)paddr & DMA_BIT_MASK(32);
        RTL_W32(CounterAddrLow, cmd);
        RTL_W32(CounterAddrLow, cmd | counter_cmd);
 
-       ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
-
-       RTL_W32(CounterAddrLow, 0);
-       RTL_W32(CounterAddrHigh, 0);
-
-       return ret;
+       return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
 }
 
 static bool rtl8169_reset_counters(struct net_device *dev)
index 2b962d3..009780d 100644 (file)
@@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
        struct ravb_private *priv = netdev_priv(ndev);
        int ret = 0;
 
-       if (priv->wol_enabled) {
-               /* Reduce the usecount of the clock to zero and then
-                * restore it to its original value. This is done to force
-                * the clock to be re-enabled which is a workaround
-                * for renesas-cpg-mssr driver which do not enable clocks
-                * when resuming from PSCI suspend/resume.
-                *
-                * Without this workaround the driver fails to communicate
-                * with the hardware if WoL was enabled when the system
-                * entered PSCI suspend. This is due to that if WoL is enabled
-                * we explicitly keep the clock from being turned off when
-                * suspending, but in PSCI sleep power is cut so the clock
-                * is disabled anyhow, the clock driver is not aware of this
-                * so the clock is not turned back on when resuming.
-                *
-                * TODO: once the renesas-cpg-mssr suspend/resume is working
-                *       this clock dance should be removed.
-                */
-               clk_disable(priv->clk);
-               clk_disable(priv->clk);
-               clk_enable(priv->clk);
-               clk_enable(priv->clk);
-
-               /* Set reset mode to rearm the WoL logic */
+       /* If WoL is enabled set reset mode to rearm the WoL logic */
+       if (priv->wol_enabled)
                ravb_write(ndev, CCC_OPC_RESET, CCC);
-       }
 
        /* All register have been reset to default values.
         * Restore all registers which where setup at probe time and
index 7e060aa..53924a4 100644 (file)
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
        [FWNLCR0]       = 0x0090,
        [FWALCR0]       = 0x0094,
        [TXNLCR1]       = 0x00a0,
-       [TXALCR1]       = 0x00a0,
+       [TXALCR1]       = 0x00a4,
        [RXNLCR1]       = 0x00a8,
        [RXALCR1]       = 0x00ac,
        [FWNLCR1]       = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
        [FWNLCR0]       = 0x0090,
        [FWALCR0]       = 0x0094,
        [TXNLCR1]       = 0x00a0,
-       [TXALCR1]       = 0x00a0,
+       [TXALCR1]       = 0x00a4,
        [RXNLCR1]       = 0x00a8,
        [RXALCR1]       = 0x00ac,
        [FWNLCR1]       = 0x00b0,
@@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
                           entry, le32_to_cpu(txdesc->status));
                /* Free the original skb. */
                if (mdp->tx_skbuff[entry]) {
-                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+                       dma_unmap_single(&mdp->pdev->dev,
+                                        le32_to_cpu(txdesc->addr),
                                         le32_to_cpu(txdesc->len) >> 16,
                                         DMA_TO_DEVICE);
                        dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
@@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)
                        if (mdp->rx_skbuff[i]) {
                                struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
 
-                               dma_unmap_single(&ndev->dev,
+                               dma_unmap_single(&mdp->pdev->dev,
                                                 le32_to_cpu(rxdesc->addr),
                                                 ALIGN(mdp->rx_buf_sz, 32),
                                                 DMA_FROM_DEVICE);
                        }
                }
                ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+               dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
                                  mdp->rx_desc_dma);
                mdp->rx_ring = NULL;
        }
@@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
                sh_eth_tx_free(ndev, false);
 
                ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+               dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
                                  mdp->tx_desc_dma);
                mdp->tx_ring = NULL;
        }
@@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
 
                /* The size of the buffer is a multiple of 32 bytes. */
                buf_len = ALIGN(mdp->rx_buf_sz, 32);
-               dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
+               dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
                                          DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, dma_addr)) {
+               if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                        kfree_skb(skb);
                        break;
                }
@@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
 
        /* Allocate all Rx descriptors. */
        rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-       mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
-                                         GFP_KERNEL);
+       mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
+                                         &mdp->rx_desc_dma, GFP_KERNEL);
        if (!mdp->rx_ring)
                goto ring_free;
 
@@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
 
        /* Allocate all Tx descriptors. */
        tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
-       mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
-                                         GFP_KERNEL);
+       mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
+                                         &mdp->tx_desc_dma, GFP_KERNEL);
        if (!mdp->tx_ring)
                goto ring_free;
        return 0;
@@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        mdp->rx_skbuff[entry] = NULL;
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
-                       dma_unmap_single(&ndev->dev, dma_addr,
+                       dma_unmap_single(&mdp->pdev->dev, dma_addr,
                                         ALIGN(mdp->rx_buf_sz, 32),
                                         DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
@@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        sh_eth_set_receive_align(skb);
-                       dma_addr = dma_map_single(&ndev->dev, skb->data,
+                       dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
                                                  buf_len, DMA_FROM_DEVICE);
-                       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                       if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                                kfree_skb(skb);
                                break;
                        }
@@ -1891,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
                return PTR_ERR(phydev);
        }
 
+       /* mask with MAC supported features */
+       if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
+               int err = phy_set_max_speed(phydev, SPEED_100);
+               if (err) {
+                       netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
+                       phy_disconnect(phydev);
+                       return err;
+               }
+       }
+
        phy_attached_info(phydev);
 
        return 0;
@@ -2078,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
                add_reg(CSMR);
        if (cd->select_mii)
                add_reg(RMII_MII);
-       add_reg(ARSTR);
        if (cd->tsu) {
+               add_tsu_reg(ARSTR);
                add_tsu_reg(TSU_CTRST);
                add_tsu_reg(TSU_FWEN0);
                add_tsu_reg(TSU_FWEN1);
@@ -2441,9 +2452,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        /* soft swap. */
        if (!mdp->cd->hw_swap)
                sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
-       dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+       dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
                                  DMA_TO_DEVICE);
-       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+       if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                kfree_skb(skb);
                return NETDEV_TX_OK;
        }
@@ -3214,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        /* ioremap the TSU registers */
        if (mdp->cd->tsu) {
                struct resource *rtsu;
+
                rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
-               if (IS_ERR(mdp->tsu_addr)) {
-                       ret = PTR_ERR(mdp->tsu_addr);
+               if (!rtsu) {
+                       dev_err(&pdev->dev, "no TSU resource\n");
+                       ret = -ENODEV;
+                       goto out_release;
+               }
+               /* We can only request the  TSU region  for the first port
+                * of the two  sharing this TSU for the probe to succeed...
+                */
+               if (devno % 2 == 0 &&
+                   !devm_request_mem_region(&pdev->dev, rtsu->start,
+                                            resource_size(rtsu),
+                                            dev_name(&pdev->dev))) {
+                       dev_err(&pdev->dev, "can't request TSU resource.\n");
+                       ret = -EBUSY;
+                       goto out_release;
+               }
+               mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
+                                            resource_size(rtsu));
+               if (!mdp->tsu_addr) {
+                       dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
+                       ret = -ENOMEM;
                        goto out_release;
                }
                mdp->port = devno % 2;
                ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
        }
 
-       /* initialize first or needed device */
-       if (!devno || pd->needs_init) {
+       /* Need to init only the first port of the two sharing a TSU */
+       if (devno % 2 == 0) {
                if (mdp->cd->chip_reset)
                        mdp->cd->chip_reset(ndev);
 
index 0ea7e16..9937a24 100644 (file)
@@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
        }
 
        if (buffer->flags & EFX_TX_BUF_SKB) {
+               EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
                (*pkts_compl)++;
                (*bytes_compl) += buffer->skb->len;
                dev_consume_skb_any((struct sk_buff *)buffer->skb);
@@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
        struct efx_tx_buffer *buffer;
+       unsigned int bytes_compl = 0;
+       unsigned int pkts_compl = 0;
 
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                --tx_queue->insert_count;
                buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
-               efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
        }
 }
 
index e1e5ac0..ce2ea2d 100644 (file)
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
        /* get timestamp value */
         u64(*get_timestamp) (void *desc, u32 ats);
        /* get rx timestamp status */
-       int (*get_rx_timestamp_status) (void *desc, u32 ats);
+       int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
        /* Display ring */
        void (*display_ring)(void *head, unsigned int size, bool rx);
        /* set MSS via context descriptor */
index 61cb248..9e6db16 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
  *
- * Copyright (C) Alexandre Torgue 2015
- * Author:  Alexandre Torgue <alexandre.torgue@gmail.com>
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author:  Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
  * License terms:  GNU General Public License (GPL), version 2
  *
  */
index e5ff734..9eb7f65 100644 (file)
@@ -808,8 +808,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                         val, reg);
 
        if (gmac->variant->soc_has_internal_phy) {
-               if (of_property_read_bool(priv->plat->phy_node,
-                                         "allwinner,leds-active-low"))
+               if (of_property_read_bool(node, "allwinner,leds-active-low"))
                        reg |= H3_EPHY_LED_POL;
                else
                        reg &= ~H3_EPHY_LED_POL;
index 4b286e2..7e089bf 100644 (file)
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
        return ret;
 }
 
-static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
+                                                u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
        int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
 
                        /* Check if timestamp is OK from context descriptor */
                        do {
-                               ret = dwmac4_rx_check_timestamp(desc);
+                               ret = dwmac4_rx_check_timestamp(next_desc);
                                if (ret < 0)
                                        goto exit;
                                i++;
index 7546b36..2a828a3 100644 (file)
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+                                           u32 ats)
 {
        if (ats) {
                struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
index f817f8f..db4cee5 100644 (file)
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
 
index 721b616..08c19eb 100644 (file)
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 {
        u32 value = readl(ioaddr + PTP_TCR);
        unsigned long data;
+       u32 reg_value;
 
        /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
         *      formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 
        data &= PTP_SSIR_SSINC_MASK;
 
+       reg_value = data;
        if (gmac4)
-               data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+               reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
 
-       writel(data, ioaddr + PTP_SSIR);
+       writel(reg_value, ioaddr + PTP_SSIR);
 
        return data;
 }
index f63c2dd..c0af0bc 100644 (file)
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
        struct net_device *ndev = priv->dev;
+       int interface = priv->plat->interface;
        unsigned long flags;
        bool ret = false;
 
+       if ((interface != PHY_INTERFACE_MODE_MII) &&
+           (interface != PHY_INTERFACE_MODE_GMII) &&
+           !phy_interface_mode_is_rgmii(interface))
+               goto out;
+
        /* Using PCS we cannot dial with the phy registers at this stage
         * so we do not support extra feature like EEE.
         */
@@ -482,7 +488,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
                desc = np;
 
        /* Check if timestamp is available */
-       if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+       if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
                ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
@@ -2588,6 +2594,7 @@ static int stmmac_open(struct net_device *dev)
 
        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+       priv->mss = 0;
 
        ret = alloc_dma_desc_resources(priv);
        if (ret < 0) {
index ed58c74..f5a7eb2 100644 (file)
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
                /* warning!!!! We are retrieving the virtual ptr in the sw_data
                 * field as a 32bit value. Will not work on 64bit machines
                 */
-               page = (struct page *)GET_SW_DATA0(desc);
+               page = (struct page *)GET_SW_DATA0(ndesc);
 
                if (likely(dma_buff && buf_len && page)) {
                        dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
index 83e6f76..3394924 100644 (file)
@@ -995,8 +995,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
        else
                name = "Rhine III";
 
-       netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
-                   name, (long)ioaddr, dev->dev_addr, rp->irq);
+       netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
+                   name, ioaddr, dev->dev_addr, rp->irq);
 
        dev_set_drvdata(hwdev, dev);
 
index 6d68c8a..da4ec57 100644 (file)
@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
 config XILINX_LL_TEMAC
        tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
        depends on (PPC || MICROBLAZE)
+       depends on !64BIT || BROKEN
        select PHYLIB
        ---help---
          This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
index b718a02..64fda2e 100644 (file)
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        if (IS_ERR(rt))
                return PTR_ERR(rt);
 
+       if (skb_dst(skb)) {
+               int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
+                         GENEVE_BASE_HLEN - info->options_len - 14;
+
+               skb_dst_update_pmtu(skb, mtu);
+       }
+
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        if (geneve->collect_md) {
                tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        if (IS_ERR(dst))
                return PTR_ERR(dst);
 
+       if (skb_dst(skb)) {
+               int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
+                         GENEVE_BASE_HLEN - info->options_len - 14;
+
+               skb_dst_update_pmtu(skb, mtu);
+       }
+
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        if (geneve->collect_md) {
                prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
index 8483f03..1ab97d9 100644 (file)
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
                            rrpriv->info_dma);
        rrpriv->info = NULL;
 
-       free_irq(pdev->irq, dev);
        spin_unlock_irqrestore(&rrpriv->lock, flags);
+       free_irq(pdev->irq, dev);
 
        return 0;
 }
index 11c1e79..77cc4fb 100644 (file)
@@ -393,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
                .flowi4_oif = dev->ifindex,
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC,
+               .flowi4_mark = skb->mark,
                .daddr = ip4h->daddr,
                .saddr = ip4h->saddr,
        };
index a178c5e..a0f2be8 100644 (file)
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        return 0;
 
 unregister_netdev:
+       /* macvlan_uninit would free the macvlan port */
        unregister_netdevice(dev);
+       return err;
 destroy_macvlan_port:
-       if (create)
+       /* the macvlan port may be freed by macvlan_uninit when fail to register.
+        * so we destroy the macvlan port only when it's valid.
+        */
+       if (create && macvlan_port_get_rtnl(dev))
                macvlan_port_destroy(port->dev);
        return err;
 }
index 5f93e6a..e911e49 100644 (file)
@@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev)
 {
        int value;
 
-       mutex_lock(&phydev->lock);
-
        value = phy_read(phydev, MII_BMCR);
        value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
        phy_write(phydev, MII_BMCR, value);
 
-       mutex_unlock(&phydev->lock);
-
        return 0;
 }
 
index 4d02b27..82104ed 100644 (file)
@@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
        if (err < 0)
                goto error;
 
+       /* Do not touch the fiber page if we're in copper->sgmii mode */
+       if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+               return 0;
+
        /* Then the fiber link */
        err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
        if (err < 0)
@@ -875,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
 
        /* SGMII-to-Copper mode initialization */
        if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+               u32 pause;
+
                /* Select page 18 */
                err = marvell_set_page(phydev, 18);
                if (err < 0)
@@ -898,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
                err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
                if (err < 0)
                        return err;
+
+               /* There appears to be a bug in the 88e1512 when used in
+                * SGMII to copper mode, where the AN advertisment register
+                * clears the pause bits each time a negotiation occurs.
+                * This means we can never be truely sure what was advertised,
+                * so disable Pause support.
+                */
+               pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+               phydev->supported &= ~pause;
+               phydev->advertising &= ~pause;
        }
 
        return m88e1121_config_init(phydev);
@@ -2069,7 +2085,7 @@ static struct phy_driver marvell_drivers[] = {
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
                .config_init = &m88e1145_config_init,
-               .config_aneg = &marvell_config_aneg,
+               .config_aneg = &m88e1101_config_aneg,
                .read_status = &genphy_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
index aebc08b..21b3f36 100644 (file)
@@ -16,6 +16,7 @@
  * link takes priority and the other port is completely locked out.
  */
 #include <linux/phy.h>
+#include <linux/marvell_phy.h>
 
 enum {
        MV_PCS_BASE_T           = 0x0000,
@@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)
 static struct phy_driver mv3310_drivers[] = {
        {
                .phy_id         = 0x002b09aa,
-               .phy_id_mask    = 0xffffffff,
+               .phy_id_mask    = MARVELL_PHY_ID_MASK,
                .name           = "mv88x3310",
                .features       = SUPPORTED_10baseT_Full |
                                  SUPPORTED_100baseT_Full |
@@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {
 module_phy_driver(mv3310_drivers);
 
 static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
-       { 0x002b09aa, 0xffffffff },
+       { 0x002b09aa, MARVELL_PHY_ID_MASK },
        { },
 };
 MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
index 1352965..6425ce0 100644 (file)
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
 
        data->regulator = devm_regulator_get(&pdev->dev, "phy");
        if (IS_ERR(data->regulator)) {
-               if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
+                       ret = -EPROBE_DEFER;
+                       goto err_out_free_mdiobus;
+               }
 
                dev_info(&pdev->dev, "no regulator found\n");
                data->regulator = NULL;
index bfd3090..07c6048 100644 (file)
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
        }
 
        ret = xgene_enet_ecc_init(pdata);
-       if (ret)
+       if (ret) {
+               if (pdata->dev->of_node)
+                       clk_disable_unprepare(pdata->clk);
                return ret;
+       }
        xgene_gmac_reset(pdata);
 
        return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
                return ret;
 
        mdio_bus = mdiobus_alloc();
-       if (!mdio_bus)
-               return -ENOMEM;
+       if (!mdio_bus) {
+               ret = -ENOMEM;
+               goto out_clk;
+       }
 
        mdio_bus->name = "APM X-Gene MDIO bus";
 
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
                mdio_bus->phy_mask = ~0;
                ret = mdiobus_register(mdio_bus);
                if (ret)
-                       goto out;
+                       goto out_mdiobus;
 
                acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
                                    acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
        }
 
        if (ret)
-               goto out;
+               goto out_mdiobus;
 
        pdata->mdio_bus = mdio_bus;
        xgene_mdio_status = true;
 
        return 0;
 
-out:
+out_mdiobus:
        mdiobus_free(mdio_bus);
 
+out_clk:
+       if (dev->of_node)
+               clk_disable_unprepare(pdata->clk);
+
        return ret;
 }
 
index 2df7b62..54d00a1 100644 (file)
@@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
 
                if (addr == mdiodev->addr) {
                        dev->of_node = child;
+                       dev->fwnode = of_fwnode_handle(child);
                        return;
                }
        }
index 1ea69b7..842eb87 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ethtool.h>
 #include <linux/phy.h>
 #include <linux/netdevice.h>
+#include <linux/bitfield.h>
 
 static int meson_gxl_config_init(struct phy_device *phydev)
 {
@@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)
        return 0;
 }
 
+/* This function is provided to cope with the possible failures of this phy
+ * during aneg process. When aneg fails, the PHY reports that aneg is done
+ * but the value found in MII_LPA is wrong:
+ *  - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
+ *    the link partner (LP) supports aneg but the LP never acked our base
+ *    code word, it is likely that we never sent it to begin with.
+ *  - Late failures: MII_LPA is filled with a value which seems to make sense
+ *    but it actually is not what the LP is advertising. It seems that we
+ *    can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
+ *    If this particular bit is not set when aneg is reported being done,
+ *    it means MII_LPA is likely to be wrong.
+ *
+ * In both case, forcing a restart of the aneg process solve the problem.
+ * When this failure happens, the first retry is usually successful but,
+ * in some cases, it may take up to 6 retries to get a decent result
+ */
+static int meson_gxl_read_status(struct phy_device *phydev)
+{
+       int ret, wol, lpa, exp;
+
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               ret = genphy_aneg_done(phydev);
+               if (ret < 0)
+                       return ret;
+               else if (!ret)
+                       goto read_status_continue;
+
+               /* Need to access WOL bank, make sure the access is open */
+               ret = phy_write(phydev, 0x14, 0x0000);
+               if (ret)
+                       return ret;
+               ret = phy_write(phydev, 0x14, 0x0400);
+               if (ret)
+                       return ret;
+               ret = phy_write(phydev, 0x14, 0x0000);
+               if (ret)
+                       return ret;
+               ret = phy_write(phydev, 0x14, 0x0400);
+               if (ret)
+                       return ret;
+
+               /* Request LPI_STATUS WOL register */
+               ret = phy_write(phydev, 0x14, 0x8D80);
+               if (ret)
+                       return ret;
+
+               /* Read LPI_STATUS value */
+               wol = phy_read(phydev, 0x15);
+               if (wol < 0)
+                       return wol;
+
+               lpa = phy_read(phydev, MII_LPA);
+               if (lpa < 0)
+                       return lpa;
+
+               exp = phy_read(phydev, MII_EXPANSION);
+               if (exp < 0)
+                       return exp;
+
+               if (!(wol & BIT(12)) ||
+                   ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
+                       /* Looks like aneg failed after all */
+                       phydev_dbg(phydev, "LPA corruption - aneg restart\n");
+                       return genphy_restart_aneg(phydev);
+               }
+       }
+
+read_status_continue:
+       return genphy_read_status(phydev);
+}
+
 static struct phy_driver meson_gxl_phy[] = {
        {
                .phy_id         = 0x01814400,
@@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = {
                .config_init    = meson_gxl_config_init,
                .config_aneg    = genphy_config_aneg,
                .aneg_done      = genphy_aneg_done,
-               .read_status    = genphy_read_status,
+               .read_status    = meson_gxl_read_status,
                .suspend        = genphy_suspend,
                .resume         = genphy_resume,
        },
index fdb43dd..422ff63 100644 (file)
@@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
        return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
 }
 
+/* Center KSZ9031RNX FLP timing at 16ms. */
 static int ksz9031_center_flp_timing(struct phy_device *phydev)
 {
        int result;
 
-       /* Center KSZ9031RNX FLP timing at 16ms. */
        result = ksz9031_extended_write(phydev, OP_DATA, 0,
                                        MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+       if (result)
+               return result;
+
        result = ksz9031_extended_write(phydev, OP_DATA, 0,
                                        MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
-
        if (result)
                return result;
 
@@ -622,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
                phydev->link = 0;
                if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
                        phydev->drv->config_intr(phydev);
+               return genphy_config_aneg(phydev);
        }
 
        return 0;
index 2b1e67b..ed10d1f 100644 (file)
@@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop);
  */
 void phy_start(struct phy_device *phydev)
 {
-       bool do_resume = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev)
                phydev->state = PHY_UP;
                break;
        case PHY_HALTED:
+               /* if phy was suspended, bring the physical link up again */
+               phy_resume(phydev);
+
                /* make sure interrupts are re-enabled for the PHY */
                if (phydev->irq != PHY_POLL) {
                        err = phy_enable_interrupts(phydev);
@@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev)
                }
 
                phydev->state = PHY_RESUMING;
-               do_resume = true;
                break;
        default:
                break;
        }
        mutex_unlock(&phydev->lock);
 
-       /* if phy was suspended, bring the physical link up again */
-       if (do_resume)
-               phy_resume(phydev);
-
        phy_trigger_machine(phydev, true);
 }
 EXPORT_SYMBOL(phy_start);
index 67f25ac..b15b31c 100644 (file)
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
        if (!mdio_bus_phy_may_suspend(phydev))
                goto no_resume;
 
+       mutex_lock(&phydev->lock);
        ret = phy_resume(phydev);
+       mutex_unlock(&phydev->lock);
        if (ret < 0)
                return ret;
 
@@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        if (err)
                goto error;
 
+       mutex_lock(&phydev->lock);
        phy_resume(phydev);
+       mutex_unlock(&phydev->lock);
        phy_led_triggers_register(phydev);
 
        return err;
@@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev)
        struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
        int ret = 0;
 
+       WARN_ON(!mutex_is_locked(&phydev->lock));
+
        if (phydev->drv && phydrv->resume)
                ret = phydrv->resume(phydev);
 
@@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev)
 {
        int value;
 
-       mutex_lock(&phydev->lock);
-
        value = phy_read(phydev, MII_BMCR);
        phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
 
-       mutex_unlock(&phydev->lock);
-
        return 0;
 }
 EXPORT_SYMBOL(genphy_resume);
index e3bbc70..249ce5c 100644 (file)
@@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
        pl->link_config.pause = MLO_PAUSE_AN;
        pl->link_config.speed = SPEED_UNKNOWN;
        pl->link_config.duplex = DUPLEX_UNKNOWN;
+       pl->link_config.an_enabled = true;
        pl->ops = ops;
        __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
 
@@ -773,6 +774,7 @@ void phylink_stop(struct phylink *pl)
                sfp_upstream_stop(pl->sfp_bus);
 
        set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
+       queue_work(system_power_efficient_wq, &pl->resolve);
        flush_work(&pl->resolve);
 }
 EXPORT_SYMBOL_GPL(phylink_stop);
@@ -950,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
        mutex_lock(&pl->state_mutex);
        /* Configure the MAC to match the new settings */
        linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
+       pl->link_config.interface = config.interface;
        pl->link_config.speed = our_kset.base.speed;
        pl->link_config.duplex = our_kset.base.duplex;
        pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
@@ -1293,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
                switch (cmd) {
                case SIOCGMIIPHY:
                        mii->phy_id = pl->phydev->mdio.addr;
+                       /* fall through */
 
                case SIOCGMIIREG:
                        ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1315,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
                switch (cmd) {
                case SIOCGMIIPHY:
                        mii->phy_id = 0;
+                       /* fall through */
 
                case SIOCGMIIREG:
                        ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1426,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)
        WARN_ON(!lockdep_rtnl_is_held());
 
        set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
+       queue_work(system_power_efficient_wq, &pl->resolve);
        flush_work(&pl->resolve);
-
-       netif_carrier_off(pl->netdev);
 }
 
 static void phylink_sfp_link_up(void *upstream)
index 8a1b1f4..ab64a14 100644 (file)
@@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
 void sfp_unregister_upstream(struct sfp_bus *bus)
 {
        rtnl_lock();
-       sfp_unregister_bus(bus);
+       if (bus->sfp)
+               sfp_unregister_bus(bus);
        bus->upstream = NULL;
        bus->netdev = NULL;
        rtnl_unlock();
@@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
 void sfp_unregister_socket(struct sfp_bus *bus)
 {
        rtnl_lock();
-       sfp_unregister_bus(bus);
+       if (bus->netdev)
+               sfp_unregister_bus(bus);
        bus->sfp_dev = NULL;
        bus->sfp = NULL;
        bus->socket_ops = NULL;
index e381811..9dfc1c4 100644 (file)
@@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
 {
        unsigned int los = sfp->state & SFP_F_LOS;
 
-       /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor
-        * SFP_OPTIONS_LOS_NORMAL are set?  For now, we assume
-        * the same as SFP_OPTIONS_LOS_NORMAL set.
+       /* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
+        * are set, we assume that no LOS signal is available.
         */
-       if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED)
+       if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
                los ^= SFP_F_LOS;
+       else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
+               los = 0;
 
        if (los)
                sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
                sfp_sm_link_up(sfp);
 }
 
+static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
+{
+       return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+               event == SFP_E_LOS_LOW) ||
+              (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+               event == SFP_E_LOS_HIGH);
+}
+
+static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
+{
+       return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+               event == SFP_E_LOS_HIGH) ||
+              (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+               event == SFP_E_LOS_LOW);
+}
+
 static void sfp_sm_fault(struct sfp *sfp, bool warn)
 {
        if (sfp->sm_retries && !--sfp->sm_retries) {
@@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
                return -EINVAL;
        }
 
+       /* If the module requires address swap mode, warn about it */
+       if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+               dev_warn(sfp->dev,
+                        "module address swap to access page 0xA2 is not supported.\n");
+
        return sfp_module_insert(sfp->sfp_bus, &sfp->id);
 }
 
@@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
        case SFP_S_WAIT_LOS:
                if (event == SFP_E_TX_FAULT)
                        sfp_sm_fault(sfp, true);
-               else if (event ==
-                        (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
-                         SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
+               else if (sfp_los_event_inactive(sfp, event))
                        sfp_sm_link_up(sfp);
                break;
 
@@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
                if (event == SFP_E_TX_FAULT) {
                        sfp_sm_link_down(sfp);
                        sfp_sm_fault(sfp, true);
-               } else if (event ==
-                          (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
-                           SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
+               } else if (sfp_los_event_active(sfp, event)) {
                        sfp_sm_link_down(sfp);
                        sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
                }
@@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
 {
        /* locking... and check module is present */
 
-       if (sfp->id.ext.sff8472_compliance) {
+       if (sfp->id.ext.sff8472_compliance &&
+           !(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {
                modinfo->type = ETH_MODULE_SFF_8472;
                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
        } else {
index d8e5747..264d4af 100644 (file)
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
        if (!ifname_is_set)
                snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
 
+       mutex_unlock(&pn->all_ppp_mutex);
+
        ret = register_netdevice(ppp->dev);
        if (ret < 0)
                goto err_unit;
 
        atomic_inc(&ppp_unit_count);
 
-       mutex_unlock(&pn->all_ppp_mutex);
-
        return 0;
 
 err_unit:
+       mutex_lock(&pn->all_ppp_mutex);
        unit_put(&pn->units_idr, ppp->file.index);
 err:
        mutex_unlock(&pn->all_ppp_mutex);
index 4e1da16..5aa59f4 100644 (file)
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
        struct pppoe_hdr *ph;
        struct net_device *dev;
        char *start;
+       int hlen;
 
        lock_sock(sk);
        if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
        if (total_len > (dev->mtu + dev->hard_header_len))
                goto end;
 
-
-       skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
-                          0, GFP_KERNEL);
+       hlen = LL_RESERVED_SPACE(dev);
+       skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
+                          dev->needed_tailroom, 0, GFP_KERNEL);
        if (!skb) {
                error = -ENOMEM;
                goto end;
        }
 
        /* Reserve space for headers. */
-       skb_reserve(skb, dev->hard_header_len);
+       skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
 
        skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
        /* Copy the data if there is no space for the header or if it's
         * read-only.
         */
-       if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
+       if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
                goto abort;
 
        __skb_push(skb, sizeof(*ph));
index e9489b8..0a886fd 100644 (file)
@@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
        DEFINE_WAIT(wait);
        ssize_t ret = 0;
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (skb)
                goto put;
@@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
                       size_t total_len, int flags)
 {
        struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+       struct sk_buff *skb = m->msg_control;
        int ret;
-       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+               if (skb)
+                       kfree_skb(skb);
                return -EINVAL;
-       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       }
+       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
index 228d4aa..ca5e375 100644 (file)
@@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
                if (ring->ring->is_tx) {
                        dir = DMA_TO_DEVICE;
                        order = 0;
-                       size = tbnet_frame_size(tf);
+                       size = TBNET_FRAME_SIZE;
                } else {
                        dir = DMA_FROM_DEVICE;
                        order = TBNET_RX_PAGE_ORDER;
@@ -512,6 +512,7 @@ err_free:
 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
 {
        struct tbnet_ring *ring = &net->tx_ring;
+       struct device *dma_dev = tb_ring_dma_device(ring->ring);
        struct tbnet_frame *tf;
        unsigned int index;
 
@@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
 
        tf = &ring->frames[index];
        tf->frame.size = 0;
-       tf->frame.buffer_phy = 0;
+
+       dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
+                               tbnet_frame_size(tf), DMA_TO_DEVICE);
 
        return tf;
 }
@@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
                              bool canceled)
 {
        struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
-       struct device *dma_dev = tb_ring_dma_device(ring);
        struct tbnet *net = netdev_priv(tf->dev);
 
-       dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
-                      DMA_TO_DEVICE);
-       tf->frame.buffer_phy = 0;
-
        /* Return buffer to the ring */
        net->tx_ring.prod++;
 
@@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
 static int tbnet_alloc_tx_buffers(struct tbnet *net)
 {
        struct tbnet_ring *ring = &net->tx_ring;
+       struct device *dma_dev = tb_ring_dma_device(ring->ring);
        unsigned int i;
 
        for (i = 0; i < TBNET_RING_SIZE; i++) {
                struct tbnet_frame *tf = &ring->frames[i];
+               dma_addr_t dma_addr;
 
                tf->page = alloc_page(GFP_KERNEL);
                if (!tf->page) {
@@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
                        return -ENOMEM;
                }
 
+               dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
+                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(dma_dev, dma_addr)) {
+                       __free_page(tf->page);
+                       tf->page = NULL;
+                       tbnet_free_buffers(ring);
+                       return -ENOMEM;
+               }
+
                tf->dev = net->dev;
+               tf->frame.buffer_phy = dma_addr;
                tf->frame.callback = tbnet_tx_callback;
                tf->frame.sof = TBIP_PDF_FRAME_START;
                tf->frame.eof = TBIP_PDF_FRAME_END;
@@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev)
        return 0;
 }
 
-static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
-{
-       dma_addr_t dma_addr;
-
-       dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
-                               DMA_TO_DEVICE);
-       if (dma_mapping_error(dma_dev, dma_addr))
-               return false;
-
-       tf->frame.buffer_phy = dma_addr;
-       return true;
-}
-
 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
        struct tbnet_frame **frames, u32 frame_count)
 {
@@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                /* No need to calculate checksum so we just update the
-                * total frame count and map the frames for DMA.
+                * total frame count and sync the frames for DMA.
                 */
                for (i = 0; i < frame_count; i++) {
                        hdr = page_address(frames[i]->page);
                        hdr->frame_count = cpu_to_le32(frame_count);
-                       if (!tbnet_xmit_map(dma_dev, frames[i]))
-                               goto err_unmap;
+                       dma_sync_single_for_device(dma_dev,
+                               frames[i]->frame.buffer_phy,
+                               tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
                }
 
                return true;
@@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
        *tucso = csum_fold(wsum);
 
        /* Checksum is finally calculated and we don't touch the memory
-        * anymore, so DMA map the frames now.
+        * anymore, so DMA sync the frames now.
         */
        for (i = 0; i < frame_count; i++) {
-               if (!tbnet_xmit_map(dma_dev, frames[i]))
-                       goto err_unmap;
+               dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
+                       tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
        }
 
        return true;
-
-err_unmap:
-       while (i--)
-               dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
-                              tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
-
-       return false;
 }
 
 static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
index 9574900..a8ec589 100644 (file)
@@ -611,6 +611,14 @@ static void tun_queue_purge(struct tun_file *tfile)
        skb_queue_purge(&tfile->sk.sk_error_queue);
 }
 
+static void tun_cleanup_tx_array(struct tun_file *tfile)
+{
+       if (tfile->tx_array.ring.queue) {
+               skb_array_cleanup(&tfile->tx_array);
+               memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
+       }
+}
+
 static void __tun_detach(struct tun_file *tfile, bool clean)
 {
        struct tun_file *ntfile;
@@ -657,8 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                            tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
                }
-               if (tun)
-                       skb_array_cleanup(&tfile->tx_array);
+               tun_cleanup_tx_array(tfile);
                sock_put(&tfile->sk);
        }
 }
@@ -700,11 +707,13 @@ static void tun_detach_all(struct net_device *dev)
                /* Drop read queue */
                tun_queue_purge(tfile);
                sock_put(&tfile->sk);
+               tun_cleanup_tx_array(tfile);
        }
        list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
                tun_enable_queue(tfile);
                tun_queue_purge(tfile);
                sock_put(&tfile->sk);
+               tun_cleanup_tx_array(tfile);
        }
        BUG_ON(tun->numdisabled != 0);
 
@@ -1952,8 +1961,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 
        tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (!skb) {
                /* Read frames from ring */
@@ -2069,22 +2081,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 {
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
        struct tun_struct *tun = tun_get(tfile);
+       struct sk_buff *skb = m->msg_control;
        int ret;
 
-       if (!tun)
-               return -EBADFD;
+       if (!tun) {
+               ret = -EBADFD;
+               goto out_free_skb;
+       }
 
        if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
                ret = -EINVAL;
-               goto out;
+               goto out_put_tun;
        }
        if (flags & MSG_ERRQUEUE) {
                ret = sock_recv_errqueue(sock->sk, m, total_len,
                                         SOL_PACKET, TUN_TX_TIMESTAMP);
                goto out;
        }
-       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > (ssize_t)total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2092,6 +2106,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 out:
        tun_put(tun);
        return ret;
+
+out_put_tun:
+       tun_put(tun);
+out_free_skb:
+       if (skb)
+               kfree_skb(skb);
+       return ret;
 }
 
 static int tun_peek_len(struct socket *sock)
@@ -2839,6 +2860,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
 
        sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
 
+       memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
+
        return 0;
 }
 
index 94c7804..ec56ff2 100644 (file)
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
                buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
                dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
                dev->rx_qlen = 4;
+               dev->tx_qlen = 4;
        }
 
        ret = lan78xx_write_reg(dev, BURST_CAP, buf);
index c750cf7..728819f 100644 (file)
@@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
                net->hard_header_len = 0;
                net->addr_len        = 0;
                net->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+               set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
                netdev_dbg(net, "mode: raw IP\n");
        } else if (!net->header_ops) { /* don't bother if already set */
                ether_setup(net);
+               clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
                netdev_dbg(net, "mode: Ethernet\n");
        }
 
@@ -1098,6 +1100,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+       {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
        {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
@@ -1202,12 +1205,14 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9079, 10)},   /* Sierra Wireless EM74xx */
        {QMI_FIXED_INTF(0x1199, 0x907b, 8)},    /* Sierra Wireless EM74xx */
        {QMI_FIXED_INTF(0x1199, 0x907b, 10)},   /* Sierra Wireless EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9091, 8)},    /* Sierra Wireless EM7565 */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
+       {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
        {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},    /* Telewell TW-3G HSPA+ */
index d51d9ab..0657203 100644 (file)
@@ -606,6 +606,7 @@ enum rtl8152_flags {
        PHY_RESET,
        SCHEDULE_NAPI,
        GREEN_ETHERNET,
+       DELL_TB_RX_AGG_BUG,
 };
 
 /* Define these values to match your device */
@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
                dev_kfree_skb_any(skb);
 
                remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
+
+               if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+                       break;
        }
 
        if (!skb_queue_empty(&skb_head)) {
@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
        /* rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
        ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+       if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+               ocp_data |= RX_AGG_DISABLE;
+
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 
        rtl_tally_reset(tp);
@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
                netdev->hw_features &= ~NETIF_F_RXCSUM;
        }
 
+       if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
+           udev->serial && !strcmp(udev->serial, "000001000000")) {
+               dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
+               set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
+       }
+
        netdev->ethtool_ops = &ops;
        netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
 
index 80348b6..8a22ff6 100644 (file)
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
 void usbnet_defer_kevent (struct usbnet *dev, int work)
 {
        set_bit (work, &dev->flags);
-       if (!schedule_work (&dev->kevent)) {
-               if (net_ratelimit())
-                       netdev_err(dev->net, "kevent %d may have been dropped\n", work);
-       } else {
+       if (!schedule_work (&dev->kevent))
+               netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
+       else
                netdev_dbg(dev->net, "kevent %d scheduled\n", work);
-       }
 }
 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
@@ -484,7 +482,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                return -ENOLINK;
        }
 
-       skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+       if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
+               skb = __netdev_alloc_skb(dev->net, size, flags);
+       else
+               skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
        if (!skb) {
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
                usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
index 19a985e..559b215 100644 (file)
@@ -756,7 +756,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                int num_skb_frags;
 
                buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
-               if (unlikely(!ctx)) {
+               if (unlikely(!buf)) {
                        pr_debug("%s: rx error: %d buffers out of %d missing\n",
                                 dev->name, num_buf,
                                 virtio16_to_cpu(vi->vdev,
index d1c7029..cf95290 100644 (file)
@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
                                          rq->rx_ring[i].basePA);
                        rq->rx_ring[i].base = NULL;
                }
-               rq->buf_info[i] = NULL;
        }
 
        if (rq->data_ring.base) {
@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
                        (rq->rx_ring[0].size + rq->rx_ring[1].size);
                dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
                                  rq->buf_info_pa);
+               rq->buf_info[0] = rq->buf_info[1] = NULL;
        }
 }
 
index feb1b2e..139c61c 100644 (file)
@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
                                  struct sock *sk,
                                  struct sk_buff *skb)
 {
-       /* don't divert multicast */
-       if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+       /* don't divert multicast or local broadcast */
+       if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
+           ipv4_is_lbcast(ip_hdr(skb)->daddr))
                return skb;
 
        if (qdisc_tx_is_default(vrf_dev))
index 7ac4870..c3e34e3 100644 (file)
@@ -874,8 +874,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
                              const unsigned char *addr, union vxlan_addr ip,
-                             __be16 port, __be32 src_vni, u32 vni, u32 ifindex,
-                             u16 vid)
+                             __be16 port, __be32 src_vni, __be32 vni,
+                             u32 ifindex, u16 vid)
 {
        struct vxlan_fdb *f;
        struct vxlan_rdst *rd = NULL;
@@ -2155,6 +2155,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ndst = &rt->dst;
+               if (skb_dst(skb)) {
+                       int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
+
+                       skb_dst_update_pmtu(skb, mtu);
+               }
+
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
                err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2196,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                goto out_unlock;
                }
 
+               if (skb_dst(skb)) {
+                       int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
+
+                       skb_dst_update_pmtu(skb, mtu);
+               }
+
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
                skb_scrub_packet(skb, xnet);
@@ -3103,6 +3115,11 @@ static void vxlan_config_apply(struct net_device *dev,
 
                max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
                                           VXLAN_HEADROOM);
+               if (max_mtu < ETH_MIN_MTU)
+                       max_mtu = ETH_MIN_MTU;
+
+               if (!changelink && !conf->mtu)
+                       dev->mtu = max_mtu;
        }
 
        if (dev->mtu > max_mtu)
index 37b1e0d..90a4ad9 100644 (file)
@@ -494,18 +494,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
                             break;
                     }
 
-                    data = kmalloc(xc.len, GFP_KERNEL);
-                    if (!data) {
-                            ret = -ENOMEM;
+                    data = memdup_user(xc.data, xc.len);
+                    if (IS_ERR(data)) {
+                            ret = PTR_ERR(data);
                             break;
                     }
-                    
-                    if(copy_from_user(data, xc.data, xc.len))
-                    {
-                       kfree(data);
-                       ret = -ENOMEM;
-                       break;
-                    }
 
                     printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
 
index dfb26f0..1b05b5d 100644 (file)
@@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
                if (!avp->assoc)
                        return false;
 
-               skb = ieee80211_nullfunc_get(sc->hw, vif);
+               skb = ieee80211_nullfunc_get(sc->hw, vif, false);
                if (!skb)
                        return false;
 
index f7d228b..987f125 100644 (file)
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
                }
        }
 
+       if (changed & IEEE80211_CONF_CHANGE_PS) {
+               list_for_each_entry(tmp, &wcn->vif_list, list) {
+                       vif = wcn36xx_priv_to_vif(tmp);
+                       if (hw->conf.flags & IEEE80211_CONF_PS) {
+                               if (vif->bss_conf.ps) /* ps allowed ? */
+                                       wcn36xx_pmc_enter_bmps_state(wcn, vif);
+                       } else {
+                               wcn36xx_pmc_exit_bmps_state(wcn, vif);
+                       }
+               }
+       }
+
        mutex_unlock(&wcn->conf_mutex);
 
        return 0;
@@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
                vif_priv->dtim_period = bss_conf->dtim_period;
        }
 
-       if (changed & BSS_CHANGED_PS) {
-               wcn36xx_dbg(WCN36XX_DBG_MAC,
-                           "mac bss PS set %d\n",
-                           bss_conf->ps);
-               if (bss_conf->ps) {
-                       wcn36xx_pmc_enter_bmps_state(wcn, vif);
-               } else {
-                       wcn36xx_pmc_exit_bmps_state(wcn, vif);
-               }
-       }
-
        if (changed & BSS_CHANGED_BSSID) {
                wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
                            bss_conf->bssid);
index 589fe5f..1976b80 100644 (file)
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
        struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
        if (WCN36XX_BMPS != vif_priv->pw_state) {
-               wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
-               return -EINVAL;
+               /* Unbalanced call or last BMPS enter failed */
+               wcn36xx_dbg(WCN36XX_DBG_PMC,
+                           "Not in BMPS mode, no need to exit\n");
+               return -EALREADY;
        }
        wcn36xx_smd_exit_bmps(wcn, vif);
        vif_priv->pw_state = WCN36XX_FULL_POWER;
index 6a59d06..9be0b05 100644 (file)
@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
 
        err = request_firmware(&clm, clm_name, dev);
        if (err) {
-               if (err == -ENOENT) {
-                       brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n");
-                       return 0;
-               }
-               brcmf_err("request CLM blob file failed (%d)\n", err);
-               return err;
+               brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
+                          err);
+               return 0;
        }
 
        chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
index 310c4e2..cdf9e41 100644 (file)
@@ -2070,7 +2070,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
        return head_pad;
 }
 
-/**
+/*
  * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
  * bus layer usage.
  */
@@ -4121,8 +4121,8 @@ release:
        sdio_release_host(sdiodev->func[1]);
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
-       device_release_driver(dev);
        device_release_driver(&sdiodev->func[2]->dev);
+       device_release_driver(dev);
 }
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
index 87b4434..dfa111b 100644 (file)
@@ -68,6 +68,9 @@
  * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
  * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
  * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
+ * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using
+ *     monitor mode. Note this queue is the same as the queue for P2P device
+ *     but we can't have active monitor mode along with P2P device anyway.
  * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
  * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
  *     that we are never left without the possibility to connect to an AP.
@@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq {
        IWL_MVM_DQA_CMD_QUEUE = 0,
        IWL_MVM_DQA_AUX_QUEUE = 1,
        IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
+       IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
        IWL_MVM_DQA_GCAST_QUEUE = 3,
        IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
        IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
index 9c889a3..223fb77 100644 (file)
@@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
 
 static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
 {
-       iwl_fw_dbg_stop_recording(fwrt);
-
        fwrt->dump.conf = FW_DBG_INVALID;
 }
 
index ca0b553..921cab9 100644 (file)
 #define FH_RSCSR_FRAME_INVALID         0x55550000
 #define FH_RSCSR_FRAME_ALIGN           0x40
 #define FH_RSCSR_RPA_EN                        BIT(25)
+#define FH_RSCSR_RADA_EN               BIT(26)
 #define FH_RSCSR_RXQ_POS               16
 #define FH_RSCSR_RXQ_MASK              0x3F0000
 
@@ -128,7 +129,8 @@ struct iwl_rx_packet {
         * 31:    flag flush RB request
         * 30:    flag ignore TC (terminal counter) request
         * 29:    flag fast IRQ request
-        * 28-26: Reserved
+        * 28-27: Reserved
+        * 26:    RADA enabled
         * 25:    Offload enabled
         * 24:    RPF enabled
         * 23:    RSS enabled
index a2bf530..2f22e14 100644 (file)
@@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
                                         u32 action)
 {
        struct iwl_mac_ctx_cmd cmd = {};
-       u32 tfd_queue_msk = 0;
+       u32 tfd_queue_msk = BIT(mvm->snif_queue);
        int ret;
 
        WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
index 4575595..55ab534 100644 (file)
@@ -972,6 +972,7 @@ struct iwl_mvm {
 
        /* Tx queues */
        u16 aux_queue;
+       u16 snif_queue;
        u16 probe_queue;
        u16 p2p_dev_queue;
 
@@ -1060,6 +1061,7 @@ struct iwl_mvm {
  * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
  * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
  * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
+ * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
  */
 enum iwl_mvm_status {
        IWL_MVM_STATUS_HW_RFKILL,
@@ -1071,6 +1073,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_ROC_AUX_RUNNING,
        IWL_MVM_STATUS_D3_RECONFIG,
        IWL_MVM_STATUS_FIRMWARE_RUNNING,
+       IWL_MVM_STATUS_NEED_FLUSH_P2P,
 };
 
 /* Keep track of completed init configuration */
index 7078b7e..45470b6 100644 (file)
@@ -624,6 +624,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
 
        mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+       mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
        mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
        mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
 
index 76dc583..3b8d443 100644 (file)
@@ -213,6 +213,7 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
                                        struct ieee80211_rx_status *rx_status)
 {
        int energy_a, energy_b, max_energy;
+       u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
 
        energy_a = desc->energy_a;
        energy_a = energy_a ? -energy_a : S8_MIN;
@@ -224,7 +225,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
                        energy_a, energy_b, max_energy);
 
        rx_status->signal = max_energy;
-       rx_status->chains = 0; /* TODO: phy info */
+       rx_status->chains =
+               (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
        rx_status->chain_signal[0] = energy_a;
        rx_status->chain_signal[1] = energy_b;
        rx_status->chain_signal[2] = S8_MIN;
@@ -232,8 +234,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
 
 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                             struct ieee80211_rx_status *stats,
-                            struct iwl_rx_mpdu_desc *desc, int queue,
-                            u8 *crypt_len)
+                            struct iwl_rx_mpdu_desc *desc, u32 pkt_flags,
+                            int queue, u8 *crypt_len)
 {
        u16 status = le16_to_cpu(desc->status);
 
@@ -253,6 +255,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                        return -1;
 
                stats->flag |= RX_FLAG_DECRYPTED;
+               if (pkt_flags & FH_RSCSR_RADA_EN)
+                       stats->flag |= RX_FLAG_MIC_STRIPPED;
                *crypt_len = IEEE80211_CCMP_HDR_LEN;
                return 0;
        case IWL_RX_MPDU_STATUS_SEC_TKIP:
@@ -270,6 +274,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
                                IWL_RX_MPDU_STATUS_SEC_WEP)
                        *crypt_len = IEEE80211_WEP_IV_LEN;
+
+               if (pkt_flags & FH_RSCSR_RADA_EN)
+                       stats->flag |= RX_FLAG_ICV_STRIPPED;
+
                return 0;
        case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
                if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
@@ -848,7 +856,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
        rx_status = IEEE80211_SKB_RXCB(skb);
 
-       if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) {
+       if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
+                             le32_to_cpu(pkt->len_n_flags), queue,
+                             &crypt_len)) {
                kfree_skb(skb);
                return;
        }
index c19f984..1add561 100644 (file)
@@ -1709,29 +1709,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
        sta->sta_id = IWL_MVM_INVALID_STA;
 }
 
-static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+                                         u8 sta_id, u8 fifo)
 {
        unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
                                        mvm->cfg->base_params->wd_timeout :
                                        IWL_WATCHDOG_DISABLED;
 
        if (iwl_mvm_has_new_tx_api(mvm)) {
-               int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
-                                                   mvm->aux_sta.sta_id,
-                                                   IWL_MAX_TID_COUNT,
-                                                   wdg_timeout);
-               mvm->aux_queue = queue;
+               int tvqm_queue =
+                       iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
+                                               IWL_MAX_TID_COUNT,
+                                               wdg_timeout);
+               *queue = tvqm_queue;
        } else {
                struct iwl_trans_txq_scd_cfg cfg = {
-                       .fifo = IWL_MVM_TX_FIFO_MCAST,
-                       .sta_id = mvm->aux_sta.sta_id,
+                       .fifo = fifo,
+                       .sta_id = sta_id,
                        .tid = IWL_MAX_TID_COUNT,
                        .aggregate = false,
                        .frame_limit = IWL_FRAME_LIMIT,
                };
 
-               iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
-                                  wdg_timeout);
+               iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
        }
 }
 
@@ -1750,7 +1750,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
 
        /* Map Aux queue to fifo - needs to happen before adding Aux station */
        if (!iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_queue(mvm);
+               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+                                             mvm->aux_sta.sta_id,
+                                             IWL_MVM_TX_FIFO_MCAST);
 
        ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
                                         MAC_INDEX_AUX, 0);
@@ -1764,7 +1766,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
         * to firmware so enable queue here - after the station was added
         */
        if (iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_queue(mvm);
+               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+                                             mvm->aux_sta.sta_id,
+                                             IWL_MVM_TX_FIFO_MCAST);
 
        return 0;
 }
@@ -1772,10 +1776,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
 
        lockdep_assert_held(&mvm->mutex);
-       return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
+
+       /* Map snif queue to fifo - must happen before adding snif station */
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+                                             mvm->snif_sta.sta_id,
+                                             IWL_MVM_TX_FIFO_BE);
+
+       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
                                         mvmvif->id, 0);
+       if (ret)
+               return ret;
+
+       /*
+        * For 22000 firmware and on we cannot add queue to a station unknown
+        * to firmware so enable queue here - after the station was added
+        */
+       if (iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+                                             mvm->snif_sta.sta_id,
+                                             IWL_MVM_TX_FIFO_BE);
+
+       return 0;
 }
 
 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1784,6 +1809,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        lockdep_assert_held(&mvm->mutex);
 
+       iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
+                           IWL_MAX_TID_COUNT, 0);
        ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
index 4d03149..e25cda9 100644 (file)
@@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
         * executed, and a new time event means a new command.
         */
        iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
+
+       /* Do the same for the P2P device queue (STA) */
+       if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+               struct iwl_mvm_vif *mvmvif;
+
+               /*
+                * NB: access to this pointer would be racy, but the flush bit
+                * can only be set when we had a P2P-Device VIF, and we have a
+                * flush of this work in iwl_mvm_prepare_mac_removal() so it's
+                * not really racy.
+                */
+
+               if (!WARN_ON(!mvm->p2p_device_vif)) {
+                       mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+                       iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
+                                         CMD_ASYNC);
+               }
+       }
 }
 
 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
 
        mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
 
-       if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
+       if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
-       else
+               set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+       } else {
                iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
+       }
 
        iwl_mvm_roc_finished(mvm);
 }
index 593b7f9..333bcb7 100644 (file)
@@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                        if (ap_sta_id != IWL_MVM_INVALID_STA)
                                sta_id = ap_sta_id;
                } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
-                       queue = mvm->aux_queue;
+                       queue = mvm->snif_queue;
+                       sta_id = mvm->snif_sta.sta_id;
                }
        }
 
index d46115e..03ffd84 100644 (file)
@@ -1134,9 +1134,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
        unsigned int default_timeout =
                cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS))
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
+               /*
+                * We can't know when the station is asleep or awake, so we
+                * must disable the queue hang detection.
+                */
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
+                   vif && vif->type == NL80211_IFTYPE_AP)
+                       return IWL_WATCHDOG_DISABLED;
                return iwlmvm_mod_params.tfd_q_hang_detect ?
                        default_timeout : IWL_WATCHDOG_DISABLED;
+       }
 
        trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
        txq_timer = (void *)trigger->data;
@@ -1163,6 +1172,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
                return le32_to_cpu(txq_timer->p2p_go);
        case NL80211_IFTYPE_P2P_DEVICE:
                return le32_to_cpu(txq_timer->p2p_device);
+       case NL80211_IFTYPE_MONITOR:
+               return default_timeout;
        default:
                WARN_ON(1);
                return mvm->cfg->base_params->wd_timeout;
index f21fe59..ccd7c33 100644 (file)
@@ -553,6 +553,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
@@ -664,6 +665,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
        {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
        {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
 
 #endif /* CONFIG_IWLMVM */
 
index d749abe..403e65c 100644 (file)
@@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
        return index & (q->n_window - 1);
 }
 
-static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
                                     struct iwl_txq *txq, int idx)
 {
-       return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq,
-                                                                        idx);
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (trans->cfg->use_tfh)
+               idx = iwl_pcie_get_cmd_index(txq, idx);
+
+       return txq->tfds + trans_pcie->tfd_size * idx;
 }
 
 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
index c59f458..ac05fd1 100644 (file)
@@ -49,6 +49,7 @@
  *
  *****************************************************************************/
 #include "iwl-trans.h"
+#include "iwl-prph.h"
 #include "iwl-context-info.h"
 #include "internal.h"
 
@@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
 
        trans_pcie->is_down = true;
 
+       /* Stop dbgc before stopping device */
+       iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+       udelay(100);
+       iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+
        /* tell the device to stop sending interrupts */
        iwl_disable_interrupts(trans);
 
index b7a5160..4541c86 100644 (file)
@@ -166,6 +166,7 @@ static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
                print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
                               4, buf, i, 0);
        }
+       goto out;
 
 err_read:
        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@ -1226,6 +1227,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 
        trans_pcie->is_down = true;
 
+       /* Stop dbgc before stopping device */
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+       } else {
+               iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+               udelay(100);
+               iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+       }
+
        /* tell the device to stop sending interrupts */
        iwl_disable_interrupts(trans);
 
index 16b345f..6d0a907 100644 (file)
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
 
 static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
        /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
         * idx is bounded by n_window
         */
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
        lockdep_assert_held(&txq->lock);
 
        iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
-                               iwl_pcie_get_tfd(trans_pcie, txq, idx));
+                               iwl_pcie_get_tfd(trans, txq, idx));
 
        /* free SKB */
        if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
                                            struct sk_buff *skb,
                                            struct iwl_cmd_meta *out_meta)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
-       struct iwl_tfh_tfd *tfd =
-               iwl_pcie_get_tfd(trans_pcie, txq, idx);
+       struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
        dma_addr_t tb_phys;
        bool amsdu;
        int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
        u8 group_id = iwl_cmd_groupid(cmd->id);
        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
-       struct iwl_tfh_tfd *tfd =
-               iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+       struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
 
        memset(tfd, 0, sizeof(*tfd));
 
index fed6d84..3f85713 100644 (file)
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int i, num_tbs;
-       void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
+       void *tfd = iwl_pcie_get_tfd(trans, txq, index);
 
        /* Sanity check on number of chunks */
        num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
        }
 
        trace_iwlwifi_dev_tx(trans->dev, skb,
-                            iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+                            iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
                             trans_pcie->tfd_size,
                             &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
                             hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                IEEE80211_CCMP_HDR_LEN : 0;
 
        trace_iwlwifi_dev_tx(trans->dev, skb,
-                            iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+                            iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
                             trans_pcie->tfd_size,
                             &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
 
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
               IWL_FIRST_TB_SIZE);
 
-       tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+       tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
                                         iwl_pcie_tfd_get_num_tbs(trans, tfd));
index 10b075a..f6d4a50 100644 (file)
@@ -489,6 +489,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
 
 static spinlock_t hwsim_radio_lock;
 static LIST_HEAD(hwsim_radios);
+static struct workqueue_struct *hwsim_wq;
 static int hwsim_radio_idx;
 
 static struct platform_driver mac80211_hwsim_driver = {
@@ -684,6 +685,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                         IEEE80211_STYPE_NULLFUNC |
+                                        IEEE80211_FCTL_TODS |
                                         (ps ? IEEE80211_FCTL_PM : 0));
        hdr->duration_id = cpu_to_le16(0);
        memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -3119,6 +3121,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
        if (info->attrs[HWSIM_ATTR_CHANNELS])
                param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
 
+       if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
+               GENL_SET_ERR_MSG(info, "too many channels specified");
+               return -EINVAL;
+       }
+
        if (info->attrs[HWSIM_ATTR_NO_VIF])
                param.no_vif = true;
 
@@ -3215,7 +3222,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
                if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
                        continue;
 
-               skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+               skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
                if (!skb) {
                        res = -ENOMEM;
                        goto out_err;
@@ -3341,7 +3348,7 @@ static void remove_user_radios(u32 portid)
                if (entry->destroy_on_close && entry->portid == portid) {
                        list_del(&entry->list);
                        INIT_WORK(&entry->destroy_work, destroy_radio);
-                       schedule_work(&entry->destroy_work);
+                       queue_work(hwsim_wq, &entry->destroy_work);
                }
        }
        spin_unlock_bh(&hwsim_radio_lock);
@@ -3416,7 +3423,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
 
                list_del(&data->list);
                INIT_WORK(&data->destroy_work, destroy_radio);
-               schedule_work(&data->destroy_work);
+               queue_work(hwsim_wq, &data->destroy_work);
        }
        spin_unlock_bh(&hwsim_radio_lock);
 }
@@ -3448,6 +3455,10 @@ static int __init init_mac80211_hwsim(void)
 
        spin_lock_init(&hwsim_radio_lock);
 
+       hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
+       if (!hwsim_wq)
+               return -ENOMEM;
+
        err = register_pernet_device(&hwsim_net_ops);
        if (err)
                return err;
@@ -3586,8 +3597,11 @@ static void __exit exit_mac80211_hwsim(void)
        hwsim_exit_netlink();
 
        mac80211_hwsim_free();
+       flush_workqueue(hwsim_wq);
+
        unregister_netdev(hwsim_mon);
        platform_driver_unregister(&mac80211_hwsim_driver);
        unregister_pernet_device(&hwsim_net_ops);
+       destroy_workqueue(hwsim_wq);
 }
 module_exit(exit_mac80211_hwsim);
index 03687a8..38678e9 100644 (file)
@@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
 
                priv->bss_loss_state++;
 
-               skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+               skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
                WARN_ON(!skb);
                if (skb)
                        cw1200_tx(priv->hw, NULL, skb);
@@ -2265,7 +2265,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
                .rate = 0xFF,
        };
 
-       frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+       frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
        if (!frame.skb)
                return -ENOMEM;
 
index 9915d83..6d02c66 100644 (file)
@@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
                size = sizeof(struct wl12xx_null_data_template);
                ptr = NULL;
        } else {
-               skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+               skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
                if (!skb)
                        goto out;
                size = skb->len;
index 2bfc12f..761cf85 100644 (file)
@@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                ptr = NULL;
        } else {
                skb = ieee80211_nullfunc_get(wl->hw,
-                                            wl12xx_wlvif_to_vif(wlvif));
+                                            wl12xx_wlvif_to_vif(wlvif),
+                                            false);
                if (!skb)
                        goto out;
                size = skb->len;
@@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
        struct sk_buff *skb = NULL;
        int ret = -ENOMEM;
 
-       skb = ieee80211_nullfunc_get(wl->hw, vif);
+       skb = ieee80211_nullfunc_get(wl->hw, vif, false);
        if (!skb)
                goto out;
 
index d6dff34..78ebe49 100644 (file)
@@ -186,7 +186,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Obtain the queue to be used to transmit this packet */
        index = skb_get_queue_mapping(skb);
        if (index >= num_queues) {
-               pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
+               pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
                                    index, vif->dev->name);
                index %= num_queues;
        }
index 18c85e5..9bd7dde 100644 (file)
@@ -87,6 +87,8 @@ struct netfront_cb {
 /* IRQ name is queue name with "-tx" or "-rx" appended */
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
+static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+
 struct netfront_stats {
        u64                     packets;
        u64                     bytes;
@@ -1324,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
 
        netif_carrier_off(netdev);
 
+       xenbus_switch_state(dev, XenbusStateInitialising);
        return netdev;
 
  exit:
@@ -2020,10 +2023,12 @@ static void netback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosed:
+               wake_up_all(&module_unload_q);
                if (dev->state == XenbusStateClosed)
                        break;
                /* Missed the backend's CLOSING state -- fallthrough */
        case XenbusStateClosing:
+               wake_up_all(&module_unload_q);
                xenbus_frontend_closed(dev);
                break;
        }
@@ -2129,6 +2134,20 @@ static int xennet_remove(struct xenbus_device *dev)
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
+       if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
+               xenbus_switch_state(dev, XenbusStateClosing);
+               wait_event(module_unload_q,
+                          xenbus_read_driver_state(dev->otherend) ==
+                          XenbusStateClosing);
+
+               xenbus_switch_state(dev, XenbusStateClosed);
+               wait_event(module_unload_q,
+                          xenbus_read_driver_state(dev->otherend) ==
+                          XenbusStateClosed ||
+                          xenbus_read_driver_state(dev->otherend) ==
+                          XenbusStateUnknown);
+       }
+
        xennet_disconnect_backend(info);
 
        unregister_netdev(info->netdev);
index e949e33..c586bcd 100644 (file)
@@ -211,12 +211,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
        return ret;
 }
 
-static int btt_log_read_pair(struct arena_info *arena, u32 lane,
-                       struct log_entry *ent)
+static int btt_log_group_read(struct arena_info *arena, u32 lane,
+                       struct log_group *log)
 {
        return arena_read_bytes(arena,
-                       arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
-                       2 * LOG_ENT_SIZE, 0);
+                       arena->logoff + (lane * LOG_GRP_SIZE), log,
+                       LOG_GRP_SIZE, 0);
 }
 
 static struct dentry *debugfs_root;
@@ -256,6 +256,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
        debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
        debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
        debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
+       debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
+       debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
 }
 
 static void btt_debugfs_init(struct btt *btt)
@@ -274,6 +276,11 @@ static void btt_debugfs_init(struct btt *btt)
        }
 }
 
+static u32 log_seq(struct log_group *log, int log_idx)
+{
+       return le32_to_cpu(log->ent[log_idx].seq);
+}
+
 /*
  * This function accepts two log entries, and uses the
  * sequence number to find the 'older' entry.
@@ -283,8 +290,10 @@ static void btt_debugfs_init(struct btt *btt)
  *
  * TODO The logic feels a bit kludge-y. make it better..
  */
-static int btt_log_get_old(struct log_entry *ent)
+static int btt_log_get_old(struct arena_info *a, struct log_group *log)
 {
+       int idx0 = a->log_index[0];
+       int idx1 = a->log_index[1];
        int old;
 
        /*
@@ -292,23 +301,23 @@ static int btt_log_get_old(struct log_entry *ent)
         * the next time, the following logic works out to put this
         * (next) entry into [1]
         */
-       if (ent[0].seq == 0) {
-               ent[0].seq = cpu_to_le32(1);
+       if (log_seq(log, idx0) == 0) {
+               log->ent[idx0].seq = cpu_to_le32(1);
                return 0;
        }
 
-       if (ent[0].seq == ent[1].seq)
+       if (log_seq(log, idx0) == log_seq(log, idx1))
                return -EINVAL;
-       if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
+       if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
                return -EINVAL;
 
-       if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
-               if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
+       if (log_seq(log, idx0) < log_seq(log, idx1)) {
+               if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
                        old = 0;
                else
                        old = 1;
        } else {
-               if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
+               if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
                        old = 1;
                else
                        old = 0;
@@ -328,17 +337,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
 {
        int ret;
        int old_ent, ret_ent;
-       struct log_entry log[2];
+       struct log_group log;
 
-       ret = btt_log_read_pair(arena, lane, log);
+       ret = btt_log_group_read(arena, lane, &log);
        if (ret)
                return -EIO;
 
-       old_ent = btt_log_get_old(log);
+       old_ent = btt_log_get_old(arena, &log);
        if (old_ent < 0 || old_ent > 1) {
                dev_err(to_dev(arena),
                                "log corruption (%d): lane %d seq [%d, %d]\n",
-                       old_ent, lane, log[0].seq, log[1].seq);
+                               old_ent, lane, log.ent[arena->log_index[0]].seq,
+                               log.ent[arena->log_index[1]].seq);
                /* TODO set error state? */
                return -EIO;
        }
@@ -346,7 +356,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
        ret_ent = (old_flag ? old_ent : (1 - old_ent));
 
        if (ent != NULL)
-               memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
+               memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
 
        return ret_ent;
 }
@@ -360,17 +370,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
                        u32 sub, struct log_entry *ent, unsigned long flags)
 {
        int ret;
-       /*
-        * Ignore the padding in log_entry for calculating log_half.
-        * The entry is 'committed' when we write the sequence number,
-        * and we want to ensure that that is the last thing written.
-        * We don't bother writing the padding as that would be extra
-        * media wear and write amplification
-        */
-       unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
-       u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
+       u32 group_slot = arena->log_index[sub];
+       unsigned int log_half = LOG_ENT_SIZE / 2;
        void *src = ent;
+       u64 ns_off;
 
+       ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
+               (group_slot * LOG_ENT_SIZE);
        /* split the 16B write into atomic, durable halves */
        ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
        if (ret)
@@ -453,7 +459,7 @@ static int btt_log_init(struct arena_info *arena)
 {
        size_t logsize = arena->info2off - arena->logoff;
        size_t chunk_size = SZ_4K, offset = 0;
-       struct log_entry log;
+       struct log_entry ent;
        void *zerobuf;
        int ret;
        u32 i;
@@ -485,11 +491,11 @@ static int btt_log_init(struct arena_info *arena)
        }
 
        for (i = 0; i < arena->nfree; i++) {
-               log.lba = cpu_to_le32(i);
-               log.old_map = cpu_to_le32(arena->external_nlba + i);
-               log.new_map = cpu_to_le32(arena->external_nlba + i);
-               log.seq = cpu_to_le32(LOG_SEQ_INIT);
-               ret = __btt_log_write(arena, i, 0, &log, 0);
+               ent.lba = cpu_to_le32(i);
+               ent.old_map = cpu_to_le32(arena->external_nlba + i);
+               ent.new_map = cpu_to_le32(arena->external_nlba + i);
+               ent.seq = cpu_to_le32(LOG_SEQ_INIT);
+               ret = __btt_log_write(arena, i, 0, &ent, 0);
                if (ret)
                        goto free;
        }
@@ -594,6 +600,123 @@ static int btt_freelist_init(struct arena_info *arena)
        return 0;
 }
 
+static bool ent_is_padding(struct log_entry *ent)
+{
+       return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
+               && (ent->seq == 0);
+}
+
+/*
+ * Detecting valid log indices: We read a log group (see the comments in btt.h
+ * for a description of a 'log_group' and its 'slots'), and iterate over its
+ * four slots. We expect that a padding slot will be all-zeroes, and use this
+ * to detect a padding slot vs. an actual entry.
+ *
+ * If a log_group is in the initial state, i.e. hasn't been used since the
+ * creation of this BTT layout, it will have three of the four slots with
+ * zeroes. We skip over these log_groups for the detection of log_index. If
+ * all log_groups are in the initial state (i.e. the BTT has never been
+ * written to), it is safe to assume the 'new format' of log entries in slots
+ * (0, 1).
+ */
+static int log_set_indices(struct arena_info *arena)
+{
+       bool idx_set = false, initial_state = true;
+       int ret, log_index[2] = {-1, -1};
+       u32 i, j, next_idx = 0;
+       struct log_group log;
+       u32 pad_count = 0;
+
+       for (i = 0; i < arena->nfree; i++) {
+               ret = btt_log_group_read(arena, i, &log);
+               if (ret < 0)
+                       return ret;
+
+               for (j = 0; j < 4; j++) {
+                       if (!idx_set) {
+                               if (ent_is_padding(&log.ent[j])) {
+                                       pad_count++;
+                                       continue;
+                               } else {
+                                       /* Skip if index has been recorded */
+                                       if ((next_idx == 1) &&
+                                               (j == log_index[0]))
+                                               continue;
+                                       /* valid entry, record index */
+                                       log_index[next_idx] = j;
+                                       next_idx++;
+                               }
+                               if (next_idx == 2) {
+                                       /* two valid entries found */
+                                       idx_set = true;
+                               } else if (next_idx > 2) {
+                                       /* too many valid indices */
+                                       return -ENXIO;
+                               }
+                       } else {
+                               /*
+                                * once the indices have been set, just verify
+                                * that all subsequent log groups are either in
+                                * their initial state or follow the same
+                                * indices.
+                                */
+                               if (j == log_index[0]) {
+                                       /* entry must be 'valid' */
+                                       if (ent_is_padding(&log.ent[j]))
+                                               return -ENXIO;
+                               } else if (j == log_index[1]) {
+                                       ;
+                                       /*
+                                        * log_index[1] can be padding if the
+                                        * lane never got used and it is still
+                                        * in the initial state (three 'padding'
+                                        * entries)
+                                        */
+                               } else {
+                                       /* entry must be invalid (padding) */
+                                       if (!ent_is_padding(&log.ent[j]))
+                                               return -ENXIO;
+                               }
+                       }
+               }
+               /*
+                * If any of the log_groups have more than one valid,
+                * non-padding entry, then the we are no longer in the
+                * initial_state
+                */
+               if (pad_count < 3)
+                       initial_state = false;
+               pad_count = 0;
+       }
+
+       if (!initial_state && !idx_set)
+               return -ENXIO;
+
+       /*
+        * If all the entries in the log were in the initial state,
+        * assume new padding scheme
+        */
+       if (initial_state)
+               log_index[1] = 1;
+
+       /*
+        * Only allow the known permutations of log/padding indices,
+        * i.e. (0, 1), and (0, 2)
+        */
+       if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
+               ; /* known index possibilities */
+       else {
+               dev_err(to_dev(arena), "Found an unknown padding scheme\n");
+               return -ENXIO;
+       }
+
+       arena->log_index[0] = log_index[0];
+       arena->log_index[1] = log_index[1];
+       dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
+       dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
+       return 0;
+}
+
 static int btt_rtt_init(struct arena_info *arena)
 {
        arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
@@ -650,8 +773,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
        available -= 2 * BTT_PG_SIZE;
 
        /* The log takes a fixed amount of space based on nfree */
-       logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
-                               BTT_PG_SIZE);
+       logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
        available -= logsize;
 
        /* Calculate optimal split between map and data area */
@@ -668,6 +790,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
        arena->mapoff = arena->dataoff + datasize;
        arena->logoff = arena->mapoff + mapsize;
        arena->info2off = arena->logoff + logsize;
+
+       /* Default log indices are (0,1) */
+       arena->log_index[0] = 0;
+       arena->log_index[1] = 1;
        return arena;
 }
 
@@ -758,6 +884,13 @@ static int discover_arenas(struct btt *btt)
                arena->external_lba_start = cur_nlba;
                parse_arena_meta(arena, super, cur_off);
 
+               ret = log_set_indices(arena);
+               if (ret) {
+                       dev_err(to_dev(arena),
+                               "Unable to deduce log/padding indices\n");
+                       goto out;
+               }
+
                mutex_init(&arena->err_lock);
                ret = btt_freelist_init(arena);
                if (ret)
index 578c205..db3cb6d 100644 (file)
@@ -27,6 +27,7 @@
 #define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
 #define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
 #define MAP_ENT_NORMAL 0xC0000000
+#define LOG_GRP_SIZE sizeof(struct log_group)
 #define LOG_ENT_SIZE sizeof(struct log_entry)
 #define ARENA_MIN_SIZE (1UL << 24)     /* 16 MB */
 #define ARENA_MAX_SIZE (1ULL << 39)    /* 512 GB */
@@ -50,12 +51,52 @@ enum btt_init_state {
        INIT_READY
 };
 
+/*
+ * A log group represents one log 'lane', and consists of four log entries.
+ * Two of the four entries are valid entries, and the remaining two are
+ * padding. Due to an old bug in the padding location, we need to perform a
+ * test to determine the padding scheme being used, and use that scheme
+ * thereafter.
+ *
+ * In kernels prior to 4.15, 'log group' would have actual log entries at
+ * indices (0, 2) and padding at indices (1, 3), where as the correct/updated
+ * format has log entries at indices (0, 1) and padding at indices (2, 3).
+ *
+ * Old (pre 4.15) format:
+ * +-----------------+-----------------+
+ * |      ent[0]     |      ent[1]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq |       pad       |
+ * +-----------------------------------+
+ * |      ent[2]     |      ent[3]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq |       pad       |
+ * +-----------------+-----------------+
+ *
+ * New format:
+ * +-----------------+-----------------+
+ * |      ent[0]     |      ent[1]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq | lba/old/new/seq |
+ * +-----------------------------------+
+ * |      ent[2]     |      ent[3]     |
+ * |       16B       |       16B       |
+ * |       pad       |       pad       |
+ * +-----------------+-----------------+
+ *
+ * We detect during start-up which format is in use, and set
+ * arena->log_index[(0, 1)] with the detected format.
+ */
+
 struct log_entry {
        __le32 lba;
        __le32 old_map;
        __le32 new_map;
        __le32 seq;
-       __le64 padding[2];
+};
+
+struct log_group {
+       struct log_entry ent[4];
 };
 
 struct btt_sb {
@@ -125,6 +166,8 @@ struct aligned_lock {
  * @list:              List head for list of arenas
  * @debugfs_dir:       Debugfs dentry
  * @flags:             Arena flags - may signify error states.
+ * @err_lock:          Mutex for synchronizing error clearing.
+ * @log_index:         Indices of the valid log entries in a log_group
  *
  * arena_info is a per-arena handle. Once an arena is narrowed down for an
  * IO, this struct is passed around for the duration of the IO.
@@ -157,6 +200,7 @@ struct arena_info {
        /* Arena flags */
        u32 flags;
        struct mutex err_lock;
+       int log_index[2];
 };
 
 /**
@@ -176,6 +220,7 @@ struct arena_info {
  * @init_lock:         Mutex used for the BTT initialization
  * @init_state:                Flag describing the initialization state for the BTT
  * @num_arenas:                Number of arenas in the BTT instance
+ * @phys_bb:           Pointer to the namespace's badblocks structure
  */
 struct btt {
        struct gendisk *btt_disk;
index 65cc171..2adada1 100644 (file)
@@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
        u64 checksum, offset;
-       unsigned long align;
        enum nd_pfn_mode mode;
        struct nd_namespace_io *nsio;
+       unsigned long align, start_pad;
        struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
        struct nd_namespace_common *ndns = nd_pfn->ndns;
        const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
@@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 
        align = le32_to_cpu(pfn_sb->align);
        offset = le64_to_cpu(pfn_sb->dataoff);
+       start_pad = le32_to_cpu(pfn_sb->start_pad);
        if (align == 0)
                align = 1UL << ilog2(offset);
        mode = le32_to_cpu(pfn_sb->mode);
@@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
                return -EBUSY;
        }
 
-       if ((align && !IS_ALIGNED(offset, align))
+       if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
                        || !IS_ALIGNED(offset, PAGE_SIZE)) {
                dev_err(&nd_pfn->dev,
                                "bad offset: %#llx dax disabled align: %#lx\n",
@@ -582,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
        return altmap;
 }
 
+static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
+{
+       return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
+                       ALIGN_DOWN(phys, nd_pfn->align));
+}
+
 static int nd_pfn_init(struct nd_pfn *nd_pfn)
 {
        u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
@@ -637,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        start = nsio->res.start;
        size = PHYS_SECTION_ALIGN_UP(start + size) - start;
        if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-                               IORES_DESC_NONE) == REGION_MIXED) {
+                               IORES_DESC_NONE) == REGION_MIXED
+                       || !IS_ALIGNED(start + resource_size(&nsio->res),
+                               nd_pfn->align)) {
                size = resource_size(&nsio->res);
-               end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+               end_trunc = start + size - phys_pmem_align_down(nd_pfn,
+                               start + size);
        }
 
        if (start_pad + end_trunc)
-               dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+               dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
                                dev_name(&ndns->dev), start_pad + end_trunc);
 
        /*
index 25da74d..839650e 100644 (file)
@@ -1287,7 +1287,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
        BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
                        NVME_DSM_MAX_RANGES);
 
-       queue->limits.discard_alignment = size;
+       queue->limits.discard_alignment = 0;
        queue->limits.discard_granularity = size;
 
        blk_queue_max_discard_sectors(queue, UINT_MAX);
@@ -1335,6 +1335,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
                struct nvme_ns *ns, struct nvme_id_ns *id)
 {
        sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+       unsigned short bs = 1 << ns->lba_shift;
        unsigned stream_alignment = 0;
 
        if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1344,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
        blk_mq_freeze_queue(disk->queue);
        blk_integrity_unregister(disk);
 
-       blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
+       blk_queue_logical_block_size(disk->queue, bs);
+       blk_queue_physical_block_size(disk->queue, bs);
+       blk_queue_io_min(disk->queue, bs);
+
        if (ns->ms && !ns->ext &&
            (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
                nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -1449,19 +1453,19 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
        int srcu_idx, ret;
        u8 data[16] = { 0, };
 
+       ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+       if (unlikely(!ns))
+               return -EWOULDBLOCK;
+
        put_unaligned_le64(key, &data[0]);
        put_unaligned_le64(sa_key, &data[8]);
 
        memset(&c, 0, sizeof(c));
        c.common.opcode = op;
-       c.common.nsid = cpu_to_le32(head->ns_id);
+       c.common.nsid = cpu_to_le32(ns->head->ns_id);
        c.common.cdw10[0] = cpu_to_le32(cdw10);
 
-       ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
-       if (unlikely(!ns))
-               ret = -EWOULDBLOCK;
-       else
-               ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+       ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
        nvme_put_ns_from_disk(head, srcu_idx);
        return ret;
 }
@@ -1705,7 +1709,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
-       if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+       if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+           is_power_of_2(ctrl->max_hw_sectors))
                blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
        blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2869,7 +2874,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
        nvme_set_queue_limits(ctrl, ns->queue);
-       nvme_setup_streams_ns(ctrl, ns);
 
        id = nvme_identify_ns(ctrl, nsid);
        if (!id)
@@ -2880,6 +2884,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        if (nvme_init_ns_head(ns, nsid, id, &new))
                goto out_free_id;
+       nvme_setup_streams_ns(ctrl, ns);
        
 #ifdef CONFIG_NVME_MULTIPATH
        /*
@@ -2961,14 +2966,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
-       struct nvme_ns_head *head = ns->head;
-
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
        if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
                nvme_mpath_remove_disk_links(ns);
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_id_attr_group);
@@ -2976,19 +2977,21 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                        nvme_nvm_unregister_sysfs(ns);
                del_gendisk(ns->disk);
                blk_cleanup_queue(ns->queue);
+               if (blk_get_integrity(ns->disk))
+                       blk_integrity_unregister(ns->disk);
        }
 
        mutex_lock(&ns->ctrl->subsys->lock);
        nvme_mpath_clear_current_path(ns);
-       if (head)
-               list_del_rcu(&ns->siblings);
+       list_del_rcu(&ns->siblings);
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        mutex_lock(&ns->ctrl->namespaces_mutex);
        list_del_init(&ns->list);
        mutex_unlock(&ns->ctrl->namespaces_mutex);
 
-       synchronize_srcu(&head->srcu);
+       synchronize_srcu(&ns->head->srcu);
+       nvme_mpath_check_last_path(ns);
        nvme_put_ns(ns);
 }
 
index 76b4fe6..894c2cc 100644 (file)
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void)
                return NULL;
 
        kref_init(&host->ref);
+       uuid_gen(&host->id);
        snprintf(host->nqn, NVMF_NQN_SIZE,
                "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
 
index 42232e7..9ba6149 100644 (file)
@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
 
+static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
+               struct request *rq)
+{
+       struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+       /*
+        * We cannot accept any other command until the connect command has
+        * completed, so only allow connect to pass.
+        */
+       if (!blk_rq_is_passthrough(rq) ||
+           cmd->common.opcode != nvme_fabrics_command ||
+           cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+               /*
+                * Reconnecting state means transport disruption, which can take
+                * a long time and even might fail permanently, fail fast to
+                * give upper layers a chance to failover.
+                * Deleting state means that the ctrl will never accept commands
+                * again, fail it permanently.
+                */
+               if (ctrl->state == NVME_CTRL_RECONNECTING ||
+                   ctrl->state == NVME_CTRL_DELETING) {
+                       nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+                       return BLK_STS_IOERR;
+               }
+               return BLK_STS_RESOURCE; /* try again later */
+       }
+
+       return BLK_STS_OK;
+}
+
 #endif /* _NVME_FABRICS_H */
index 7ab0be5..794e66e 100644 (file)
@@ -31,7 +31,8 @@
 
 
 enum nvme_fc_queue_flags {
-       NVME_FC_Q_CONNECTED = (1 << 0),
+       NVME_FC_Q_CONNECTED = 0,
+       NVME_FC_Q_LIVE,
 };
 
 #define NVMEFC_QUEUE_DELAY     3               /* ms units */
@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
        if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
                return;
 
+       clear_bit(NVME_FC_Q_LIVE, &queue->flags);
        /*
         * Current implementation never disconnects a single queue.
         * It always terminates a whole association. So there is never
@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
         */
 
        queue->connection_id = 0;
-       clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
 }
 
 static void
@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        break;
+
+               set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
        }
 
        return ret;
@@ -2320,6 +2323,14 @@ busy:
        return BLK_STS_RESOURCE;
 }
 
+static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
+               struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
+}
+
 static blk_status_t
 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
                        const struct blk_mq_queue_data *bd)
@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        u32 data_len;
        blk_status_t ret;
 
+       ret = nvme_fc_is_ready(queue, rq);
+       if (unlikely(ret))
+               return ret;
+
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
                return ret;
@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_disconnect_admin_queue;
 
+       set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
        /*
         * Check controller capabilities
         *
@@ -3204,7 +3221,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
-               nvme_put_ctrl(&ctrl->ctrl);
 
                /* Remove core ctrl ref. */
                nvme_put_ctrl(&ctrl->ctrl);
index 78d9215..1218a9f 100644 (file)
@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
                bio->bi_opf |= REQ_NVME_MPATH;
                ret = direct_make_request(bio);
        } else if (!list_empty_careful(&head->list)) {
-               dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
+               dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
 
                spin_lock_irq(&head->requeue_lock);
                bio_list_add(&head->requeue_list, bio);
index c0873a6..a00eabd 100644 (file)
@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
  * found empirically.
  */
-#define NVME_QUIRK_DELAY_AMOUNT                2000
+#define NVME_QUIRK_DELAY_AMOUNT                2300
 
 enum nvme_ctrl_state {
        NVME_CTRL_NEW,
@@ -417,6 +417,15 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
                rcu_assign_pointer(head->current_path, NULL);
 }
 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+       struct nvme_ns_head *head = ns->head;
+
+       if (head->disk && list_empty(&head->list))
+               kblockd_schedule_work(&head->requeue_work);
+}
+
 #else
 static inline void nvme_failover_req(struct request *req)
 {
@@ -448,6 +457,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
 {
 }
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 #ifdef CONFIG_NVM
index a11cfd4..4276ebf 100644 (file)
@@ -448,12 +448,34 @@ static void **nvme_pci_iod_list(struct request *req)
        return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       int nseg = blk_rq_nr_phys_segments(req);
+       unsigned int avg_seg_size;
+
+       if (nseg == 0)
+               return false;
+
+       avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+
+       if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+               return false;
+       if (!iod->nvmeq->qid)
+               return false;
+       if (!sgl_threshold || avg_seg_size < sgl_threshold)
+               return false;
+       return true;
+}
+
 static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
        int nseg = blk_rq_nr_phys_segments(rq);
        unsigned int size = blk_rq_payload_bytes(rq);
 
+       iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
                size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
                                iod->use_sgl);
@@ -604,8 +626,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
        dma_addr_t prp_dma;
        int nprps, i;
 
-       iod->use_sgl = false;
-
        length -= (page_size - offset);
        if (length <= 0) {
                iod->first_dma = 0;
@@ -705,22 +725,19 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
 }
 
 static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
-               struct request *req, struct nvme_rw_command *cmd)
+               struct request *req, struct nvme_rw_command *cmd, int entries)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       int length = blk_rq_payload_bytes(req);
        struct dma_pool *pool;
        struct nvme_sgl_desc *sg_list;
        struct scatterlist *sg = iod->sg;
-       int entries = iod->nents, i = 0;
        dma_addr_t sgl_dma;
-
-       iod->use_sgl = true;
+       int i = 0;
 
        /* setting the transfer type as SGL */
        cmd->flags = NVME_CMD_SGL_METABUF;
 
-       if (length == sg_dma_len(sg)) {
+       if (entries == 1) {
                nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
                return BLK_STS_OK;
        }
@@ -760,33 +777,12 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
                }
 
                nvme_pci_sgl_set_data(&sg_list[i++], sg);
-
-               length -= sg_dma_len(sg);
                sg = sg_next(sg);
-               entries--;
-       } while (length > 0);
+       } while (--entries > 0);
 
-       WARN_ON(entries > 0);
        return BLK_STS_OK;
 }
 
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
-{
-       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       unsigned int avg_seg_size;
-
-       avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
-                       blk_rq_nr_phys_segments(req));
-
-       if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
-               return false;
-       if (!iod->nvmeq->qid)
-               return false;
-       if (!sgl_threshold || avg_seg_size < sgl_threshold)
-               return false;
-       return true;
-}
-
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                struct nvme_command *cmnd)
 {
@@ -795,6 +791,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
        enum dma_data_direction dma_dir = rq_data_dir(req) ?
                        DMA_TO_DEVICE : DMA_FROM_DEVICE;
        blk_status_t ret = BLK_STS_IOERR;
+       int nr_mapped;
 
        sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
        iod->nents = blk_rq_map_sg(q, req, iod->sg);
@@ -802,12 +799,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                goto out;
 
        ret = BLK_STS_RESOURCE;
-       if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
-                               DMA_ATTR_NO_WARN))
+       nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+                       DMA_ATTR_NO_WARN);
+       if (!nr_mapped)
                goto out;
 
-       if (nvme_pci_use_sgls(dev, req))
-               ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+       if (iod->use_sgl)
+               ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
        else
                ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
 
@@ -1759,6 +1757,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
                        dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
                        dev->host_mem_descs, dev->host_mem_descs_dma);
        dev->host_mem_descs = NULL;
+       dev->nr_host_mem_descs = 0;
 }
 
 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
@@ -1787,7 +1786,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
        if (!bufs)
                goto out_free_descs;
 
-       for (size = 0; size < preferred; size += len) {
+       for (size = 0; size < preferred && i < max_entries; size += len) {
                dma_addr_t dma_addr;
 
                len = min_t(u64, chunk_size, preferred - size);
@@ -2428,7 +2427,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
        return -ENODEV;
 }
 
-static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
 {
        if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
                /*
@@ -2443,6 +2442,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
                    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
                     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
                        return NVME_QUIRK_NO_DEEPEST_PS;
+       } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
+               /*
+                * Samsung SSD 960 EVO drops off the PCIe bus after system
+                * suspend on a Ryzen board, ASUS PRIME B350M-A.
+                */
+               if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
+                   dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
+                       return NVME_QUIRK_NO_APST;
        }
 
        return 0;
@@ -2482,7 +2489,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto unmap;
 
-       quirks |= check_dell_samsung_bug(pdev);
+       quirks |= check_vendor_combination_bug(pdev);
 
        result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
                        quirks);
@@ -2665,6 +2672,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+       { PCI_DEVICE(0x1c58, 0x0023),   /* WDC SN200 adapter */
+               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
index 4f9bf2f..2a0bba7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <rdma/mr_pool.h>
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/atomic.h>
@@ -59,6 +60,9 @@ struct nvme_rdma_request {
        struct nvme_request     req;
        struct ib_mr            *mr;
        struct nvme_rdma_qe     sqe;
+       union nvme_result       result;
+       __le16                  status;
+       refcount_t              ref;
        struct ib_sge           sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
        u32                     num_sge;
        int                     nents;
@@ -73,11 +77,11 @@ struct nvme_rdma_request {
 enum nvme_rdma_queue_flags {
        NVME_RDMA_Q_ALLOCATED           = 0,
        NVME_RDMA_Q_LIVE                = 1,
+       NVME_RDMA_Q_TR_READY            = 2,
 };
 
 struct nvme_rdma_queue {
        struct nvme_rdma_qe     *rsp_ring;
-       atomic_t                sig_count;
        int                     queue_size;
        size_t                  cmnd_capsule_len;
        struct nvme_rdma_ctrl   *ctrl;
@@ -258,32 +262,6 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
        return ret;
 }
 
-static int nvme_rdma_reinit_request(void *data, struct request *rq)
-{
-       struct nvme_rdma_ctrl *ctrl = data;
-       struct nvme_rdma_device *dev = ctrl->device;
-       struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-       int ret = 0;
-
-       if (WARN_ON_ONCE(!req->mr))
-               return 0;
-
-       ib_dereg_mr(req->mr);
-
-       req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-                       ctrl->max_fr_pages);
-       if (IS_ERR(req->mr)) {
-               ret = PTR_ERR(req->mr);
-               req->mr = NULL;
-               goto out;
-       }
-
-       req->mr->need_inval = false;
-
-out:
-       return ret;
-}
-
 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
                struct request *rq, unsigned int hctx_idx)
 {
@@ -293,9 +271,6 @@ static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
        struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
        struct nvme_rdma_device *dev = queue->device;
 
-       if (req->mr)
-               ib_dereg_mr(req->mr);
-
        nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
                        DMA_TO_DEVICE);
 }
@@ -317,21 +292,9 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
        if (ret)
                return ret;
 
-       req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-                       ctrl->max_fr_pages);
-       if (IS_ERR(req->mr)) {
-               ret = PTR_ERR(req->mr);
-               goto out_free_qe;
-       }
-
        req->queue = queue;
 
        return 0;
-
-out_free_qe:
-       nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
-                       DMA_TO_DEVICE);
-       return -ENOMEM;
 }
 
 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -428,10 +391,23 @@ out_err:
 
 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 {
-       struct nvme_rdma_device *dev = queue->device;
-       struct ib_device *ibdev = dev->dev;
+       struct nvme_rdma_device *dev;
+       struct ib_device *ibdev;
 
-       rdma_destroy_qp(queue->cm_id);
+       if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
+               return;
+
+       dev = queue->device;
+       ibdev = dev->dev;
+
+       ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+
+       /*
+        * The cm_id object might have been destroyed during RDMA connection
+        * establishment error flow to avoid getting other cma events, thus
+        * the destruction of the QP shouldn't use rdma_cm API.
+        */
+       ib_destroy_qp(queue->qp);
        ib_free_cq(queue->ib_cq);
 
        nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
@@ -440,6 +416,12 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
        nvme_rdma_dev_put(dev);
 }
 
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+{
+       return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+                    ibdev->attrs.max_fast_reg_page_list_len);
+}
+
 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
 {
        struct ib_device *ibdev;
@@ -482,8 +464,24 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
                goto out_destroy_qp;
        }
 
+       ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+                             queue->queue_size,
+                             IB_MR_TYPE_MEM_REG,
+                             nvme_rdma_get_max_fr_pages(ibdev));
+       if (ret) {
+               dev_err(queue->ctrl->ctrl.device,
+                       "failed to initialize MR pool sized %d for QID %d\n",
+                       queue->queue_size, idx);
+               goto out_destroy_ring;
+       }
+
+       set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
+
        return 0;
 
+out_destroy_ring:
+       nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+                           sizeof(struct nvme_completion), DMA_FROM_DEVICE);
 out_destroy_qp:
        rdma_destroy_qp(queue->cm_id);
 out_destroy_ib_cq:
@@ -510,7 +508,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
                queue->cmnd_capsule_len = sizeof(struct nvme_command);
 
        queue->queue_size = queue_size;
-       atomic_set(&queue->sig_count, 0);
 
        queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
                        RDMA_PS_TCP, IB_QPT_RC);
@@ -546,6 +543,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
 
 out_destroy_cm_id:
        rdma_destroy_id(queue->cm_id);
+       nvme_rdma_destroy_queue_ib(queue);
        return ret;
 }
 
@@ -756,8 +754,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 
        ctrl->device = ctrl->queues[0].device;
 
-       ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
-               ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+       ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
@@ -771,10 +768,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
                        error = PTR_ERR(ctrl->ctrl.admin_q);
                        goto out_free_tagset;
                }
-       } else {
-               error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
-               if (error)
-                       goto out_free_queue;
        }
 
        error = nvme_rdma_start_queue(ctrl, 0);
@@ -854,10 +847,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
                        goto out_free_tag_set;
                }
        } else {
-               ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
-               if (ret)
-                       goto out_free_io_queues;
-
                blk_mq_update_nr_hw_queues(&ctrl->tag_set,
                        ctrl->ctrl.queue_count - 1);
        }
@@ -985,12 +974,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        nvme_start_queues(&ctrl->ctrl);
 
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+               /* state change failure should never happen */
+               WARN_ON_ONCE(1);
+               return;
+       }
+
        nvme_rdma_reconnect_or_remove(ctrl);
 }
 
 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
 {
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
                return;
 
        queue_work(nvme_wq, &ctrl->err_work);
@@ -1018,8 +1013,18 @@ static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
 
 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
 {
-       if (unlikely(wc->status != IB_WC_SUCCESS))
+       struct nvme_rdma_request *req =
+               container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
+       struct request *rq = blk_mq_rq_from_pdu(req);
+
+       if (unlikely(wc->status != IB_WC_SUCCESS)) {
                nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+               return;
+       }
+
+       if (refcount_dec_and_test(&req->ref))
+               nvme_end_request(rq, req->status, req->result);
+
 }
 
 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1030,7 +1035,7 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
                .opcode             = IB_WR_LOCAL_INV,
                .next               = NULL,
                .num_sge            = 0,
-               .send_flags         = 0,
+               .send_flags         = IB_SEND_SIGNALED,
                .ex.invalidate_rkey = req->mr->rkey,
        };
 
@@ -1044,22 +1049,15 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
                struct request *rq)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-       struct nvme_rdma_ctrl *ctrl = queue->ctrl;
        struct nvme_rdma_device *dev = queue->device;
        struct ib_device *ibdev = dev->dev;
-       int res;
 
        if (!blk_rq_bytes(rq))
                return;
 
-       if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) {
-               res = nvme_rdma_inv_rkey(queue, req);
-               if (unlikely(res < 0)) {
-                       dev_err(ctrl->ctrl.device,
-                               "Queueing INV WR for rkey %#x failed (%d)\n",
-                               req->mr->rkey, res);
-                       nvme_rdma_error_recovery(queue->ctrl);
-               }
+       if (req->mr) {
+               ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+               req->mr = NULL;
        }
 
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
@@ -1118,12 +1116,18 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
        struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
        int nr;
 
+       req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
+       if (WARN_ON_ONCE(!req->mr))
+               return -EAGAIN;
+
        /*
         * Align the MR to a 4K page size to match the ctrl page size and
         * the block virtual boundary.
         */
        nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
        if (unlikely(nr < count)) {
+               ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+               req->mr = NULL;
                if (nr < 0)
                        return nr;
                return -EINVAL;
@@ -1142,8 +1146,6 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
                             IB_ACCESS_REMOTE_READ |
                             IB_ACCESS_REMOTE_WRITE;
 
-       req->mr->need_inval = true;
-
        sg->addr = cpu_to_le64(req->mr->iova);
        put_unaligned_le24(req->mr->length, sg->length);
        put_unaligned_le32(req->mr->rkey, sg->key);
@@ -1163,7 +1165,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
        req->num_sge = 1;
        req->inline_data = false;
-       req->mr->need_inval = false;
+       refcount_set(&req->ref, 2); /* send and recv completions */
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
@@ -1200,25 +1202,24 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
-       if (unlikely(wc->status != IB_WC_SUCCESS))
-               nvme_rdma_wr_error(cq, wc, "SEND");
-}
+       struct nvme_rdma_qe *qe =
+               container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+       struct nvme_rdma_request *req =
+               container_of(qe, struct nvme_rdma_request, sqe);
+       struct request *rq = blk_mq_rq_from_pdu(req);
 
-/*
- * We want to signal completion at least every queue depth/2.  This returns the
- * largest power of two that is not above half of (queue size + 1) to optimize
- * (avoid divisions).
- */
-static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
-{
-       int limit = 1 << ilog2((queue->queue_size + 1) / 2);
+       if (unlikely(wc->status != IB_WC_SUCCESS)) {
+               nvme_rdma_wr_error(cq, wc, "SEND");
+               return;
+       }
 
-       return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
+       if (refcount_dec_and_test(&req->ref))
+               nvme_end_request(rq, req->status, req->result);
 }
 
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
                struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
-               struct ib_send_wr *first, bool flush)
+               struct ib_send_wr *first)
 {
        struct ib_send_wr wr, *bad_wr;
        int ret;
@@ -1227,31 +1228,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
        sge->length = sizeof(struct nvme_command),
        sge->lkey   = queue->device->pd->local_dma_lkey;
 
-       qe->cqe.done = nvme_rdma_send_done;
-
        wr.next       = NULL;
        wr.wr_cqe     = &qe->cqe;
        wr.sg_list    = sge;
        wr.num_sge    = num_sge;
        wr.opcode     = IB_WR_SEND;
-       wr.send_flags = 0;
-
-       /*
-        * Unsignalled send completions are another giant desaster in the
-        * IB Verbs spec:  If we don't regularly post signalled sends
-        * the send queue will fill up and only a QP reset will rescue us.
-        * Would have been way to obvious to handle this in hardware or
-        * at least the RDMA stack..
-        *
-        * Always signal the flushes. The magic request used for the flush
-        * sequencer is not allocated in our driver's tagset and it's
-        * triggered to be freed by blk_cleanup_queue(). So we need to
-        * always mark it as signaled to ensure that the "wr_cqe", which is
-        * embedded in request's payload, is not freed when __ib_process_cq()
-        * calls wr_cqe->done().
-        */
-       if (nvme_rdma_queue_sig_limit(queue) || flush)
-               wr.send_flags |= IB_SEND_SIGNALED;
+       wr.send_flags = IB_SEND_SIGNALED;
 
        if (first)
                first->next = &wr;
@@ -1301,6 +1283,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
        return queue->ctrl->tag_set.tags[queue_idx - 1];
 }
 
+static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       if (unlikely(wc->status != IB_WC_SUCCESS))
+               nvme_rdma_wr_error(cq, wc, "ASYNC");
+}
+
 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
 {
        struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
@@ -1319,10 +1307,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
        cmd->common.flags |= NVME_CMD_SGL_METABUF;
        nvme_rdma_set_sg_null(cmd);
 
+       sqe->cqe.done = nvme_rdma_async_done;
+
        ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
                        DMA_TO_DEVICE);
 
-       ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+       ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
        WARN_ON_ONCE(ret);
 }
 
@@ -1343,14 +1333,34 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
        }
        req = blk_mq_rq_to_pdu(rq);
 
-       if (rq->tag == tag)
-               ret = 1;
+       req->status = cqe->status;
+       req->result = cqe->result;
 
-       if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
-           wc->ex.invalidate_rkey == req->mr->rkey)
-               req->mr->need_inval = false;
+       if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+               if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+                       dev_err(queue->ctrl->ctrl.device,
+                               "Bogus remote invalidation for rkey %#x\n",
+                               req->mr->rkey);
+                       nvme_rdma_error_recovery(queue->ctrl);
+               }
+       } else if (req->mr) {
+               ret = nvme_rdma_inv_rkey(queue, req);
+               if (unlikely(ret < 0)) {
+                       dev_err(queue->ctrl->ctrl.device,
+                               "Queueing INV WR for rkey %#x failed (%d)\n",
+                               req->mr->rkey, ret);
+                       nvme_rdma_error_recovery(queue->ctrl);
+               }
+               /* the local invalidation completion will end the request */
+               return 0;
+       }
+
+       if (refcount_dec_and_test(&req->ref)) {
+               if (rq->tag == tag)
+                       ret = 1;
+               nvme_end_request(rq, req->status, req->result);
+       }
 
-       nvme_end_request(rq, cqe->status, cqe->result);
        return ret;
 }
 
@@ -1591,31 +1601,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
  * We cannot accept any other command until the Connect command has completed.
  */
 static inline blk_status_t
-nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
-{
-       if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
-               struct nvme_command *cmd = nvme_req(rq)->cmd;
-
-               if (!blk_rq_is_passthrough(rq) ||
-                   cmd->common.opcode != nvme_fabrics_command ||
-                   cmd->fabrics.fctype != nvme_fabrics_type_connect) {
-                       /*
-                        * reconnecting state means transport disruption, which
-                        * can take a long time and even might fail permanently,
-                        * fail fast to give upper layers a chance to failover.
-                        * deleting state means that the ctrl will never accept
-                        * commands again, fail it permanently.
-                        */
-                       if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
-                           queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
-                               nvme_req(rq)->status = NVME_SC_ABORT_REQ;
-                               return BLK_STS_IOERR;
-                       }
-                       return BLK_STS_RESOURCE; /* try again later */
-               }
-       }
-
-       return 0;
+nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
 }
 
 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1627,14 +1617,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_qe *sqe = &req->sqe;
        struct nvme_command *c = sqe->data;
-       bool flush = false;
        struct ib_device *dev;
        blk_status_t ret;
        int err;
 
        WARN_ON_ONCE(rq->tag < 0);
 
-       ret = nvme_rdma_queue_is_ready(queue, rq);
+       ret = nvme_rdma_is_ready(queue, rq);
        if (unlikely(ret))
                return ret;
 
@@ -1656,13 +1645,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
                goto err;
        }
 
+       sqe->cqe.done = nvme_rdma_send_done;
+
        ib_dma_sync_single_for_device(dev, sqe->dma,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
-       if (req_op(rq) == REQ_OP_FLUSH)
-               flush = true;
        err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
-                       req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+                       req->mr ? &req->reg_wr.wr : NULL);
        if (unlikely(err)) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
@@ -1770,6 +1759,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
        nvme_stop_ctrl(&ctrl->ctrl);
        nvme_rdma_shutdown_ctrl(ctrl, false);
 
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+               /* state change failure should never happen */
+               WARN_ON_ONCE(1);
+               return;
+       }
+
        ret = nvme_rdma_configure_admin_queue(ctrl, false);
        if (ret)
                goto out_fail;
@@ -1810,7 +1805,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
        .submit_async_event     = nvme_rdma_submit_async_event,
        .delete_ctrl            = nvme_rdma_delete_ctrl,
        .get_address            = nvmf_get_address,
-       .reinit_request         = nvme_rdma_reinit_request,
 };
 
 static inline bool
index 664d301..5fd8603 100644 (file)
@@ -533,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 
        tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
 
+       /* release the queue lookup reference on the completed IO */
+       nvmet_fc_tgt_q_put(queue);
+
        spin_lock_irqsave(&queue->qlock, flags);
        deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
                                struct nvmet_fc_defer_fcp_req, req_list);
        if (!deferfcp) {
                list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
                spin_unlock_irqrestore(&queue->qlock, flags);
-
-               /* Release reference taken at queue lookup and fod allocation */
-               nvmet_fc_tgt_q_put(queue);
                return;
        }
 
@@ -760,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
                tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
                                deferfcp->fcp_req);
 
+               /* release the queue lookup reference */
+               nvmet_fc_tgt_q_put(queue);
+
                kfree(deferfcp);
 
                spin_lock_irqsave(&queue->qlock, flags);
index 7b75d9d..6a018a0 100644 (file)
@@ -1085,7 +1085,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
 {
        struct fcloop_nport *nport = NULL, *tmpport;
-       struct fcloop_tport *tport;
+       struct fcloop_tport *tport = NULL;
        u64 nodename, portname;
        unsigned long flags;
        int ret;
index 96d3904..1e21b28 100644 (file)
@@ -52,10 +52,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
        return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
 }
 
+enum nvme_loop_queue_flags {
+       NVME_LOOP_Q_LIVE        = 0,
+};
+
 struct nvme_loop_queue {
        struct nvmet_cq         nvme_cq;
        struct nvmet_sq         nvme_sq;
        struct nvme_loop_ctrl   *ctrl;
+       unsigned long           flags;
 };
 
 static struct nvmet_port *nvmet_loop_port;
@@ -144,6 +149,14 @@ nvme_loop_timeout(struct request *rq, bool reserved)
        return BLK_EH_HANDLED;
 }
 
+static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
+               struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
+}
+
 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
@@ -153,6 +166,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
        blk_status_t ret;
 
+       ret = nvme_loop_is_ready(queue, req);
+       if (unlikely(ret))
+               return ret;
+
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
        if (ret)
                return ret;
@@ -267,6 +284,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
+       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
@@ -297,8 +315,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
 {
        int i;
 
-       for (i = 1; i < ctrl->ctrl.queue_count; i++)
+       for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+               clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
                nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+       }
 }
 
 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -338,6 +358,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        return ret;
+               set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
        }
 
        return 0;
@@ -380,6 +401,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        if (error)
                goto out_cleanup_queue;
 
+       set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+
        error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
        if (error) {
                dev_err(ctrl->ctrl.device,
index a346b49..41d3a3c 100644 (file)
@@ -156,8 +156,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
                                 MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
                                 MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
 
-       for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
-               addr = i / efuse->config.word_size;
+       for (i = 0; i < bytes; i += efuse->config.word_size) {
+               addr = (offset + i) / efuse->config.word_size;
 
                err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
                if (err)
index c454941..ab988d8 100644 (file)
@@ -695,7 +695,7 @@ int __of_changeset_apply_entries(struct of_changeset *ocs, int *ret_revert)
 /*
  * Returns 0 on success, a negative error value in case of an error.
  *
- * If multiple changset entry notification errors occur then only the
+ * If multiple changeset entry notification errors occur then only the
  * final notification error is reported.
  */
 int __of_changeset_apply_notify(struct of_changeset *ocs)
@@ -795,7 +795,7 @@ int __of_changeset_revert_entries(struct of_changeset *ocs, int *ret_apply)
 }
 
 /*
- * If multiple changset entry notification errors occur then only the
+ * If multiple changeset entry notification errors occur then only the
  * final notification error is reported.
  */
 int __of_changeset_revert_notify(struct of_changeset *ocs)
index 9825858..a327be1 100644 (file)
@@ -81,6 +81,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
         * can be looked up later */
        of_node_get(child);
        phy->mdio.dev.of_node = child;
+       phy->mdio.dev.fwnode = of_fwnode_handle(child);
 
        /* All data is now stored in the phy struct;
         * register it */
@@ -111,6 +112,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
         */
        of_node_get(child);
        mdiodev->dev.of_node = child;
+       mdiodev->dev.fwnode = of_fwnode_handle(child);
 
        /* All data is now stored in the mdiodev struct; register it. */
        rc = mdio_device_register(mdiodev);
@@ -206,6 +208,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
        mdio->phy_mask = ~0;
 
        mdio->dev.of_node = np;
+       mdio->dev.fwnode = of_fwnode_handle(np);
 
        /* Get bus level PHY reset GPIO details */
        mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
@@ -228,7 +231,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                        rc = of_mdiobus_register_phy(mdio, child, addr);
                else
                        rc = of_mdiobus_register_device(mdio, child, addr);
-               if (rc)
+
+               if (rc == -ENODEV)
+                       dev_err(&mdio->dev,
+                               "MDIO device at address %d is missing.\n",
+                               addr);
+               else if (rc)
                        goto unregister;
        }
 
@@ -252,7 +260,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 
                        if (of_mdiobus_child_is_phy(child)) {
                                rc = of_mdiobus_register_phy(mdio, child, addr);
-                               if (rc)
+                               if (rc && rc != -ENODEV)
                                        goto unregister;
                        }
                }
index c150abb..3981b7d 100644 (file)
@@ -522,7 +522,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
        struct device_node *node, *overlay_node;
        struct fragment *fragment;
        struct fragment *fragments;
-       int cnt, ret;
+       int cnt, id, ret;
 
        /*
         * Warn for some issues.  Can not return -EINVAL for these until
@@ -543,9 +543,9 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
 
        of_changeset_init(&ovcs->cset);
 
-       ovcs->id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
-       if (ovcs->id <= 0)
-               return ovcs->id;
+       id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
+       if (id <= 0)
+               return id;
 
        cnt = 0;
 
@@ -572,18 +572,20 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
 
        cnt = 0;
        for_each_child_of_node(tree, node) {
+               overlay_node = of_get_child_by_name(node, "__overlay__");
+               if (!overlay_node)
+                       continue;
+
                fragment = &fragments[cnt];
-               fragment->overlay = of_get_child_by_name(node, "__overlay__");
-               if (fragment->overlay) {
-                       fragment->target = find_target_node(node);
-                       if (!fragment->target) {
-                               of_node_put(fragment->overlay);
-                               ret = -EINVAL;
-                               goto err_free_fragments;
-                       } else {
-                               cnt++;
-                       }
+               fragment->overlay = overlay_node;
+               fragment->target = find_target_node(node);
+               if (!fragment->target) {
+                       of_node_put(fragment->overlay);
+                       ret = -EINVAL;
+                       goto err_free_fragments;
                }
+
+               cnt++;
        }
 
        /*
@@ -611,6 +613,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
                goto err_free_fragments;
        }
 
+       ovcs->id = id;
        ovcs->count = cnt;
        ovcs->fragments = fragments;
 
@@ -619,7 +622,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
 err_free_fragments:
        kfree(fragments);
 err_free_idr:
-       idr_remove(&ovcs_idr, ovcs->id);
+       idr_remove(&ovcs_idr, id);
 
        pr_err("%s() failed, ret = %d\n", __func__, ret);
 
@@ -630,9 +633,8 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
 {
        int i;
 
-       if (!ovcs->cset.entries.next)
-               return;
-       of_changeset_destroy(&ovcs->cset);
+       if (ovcs->cset.entries.next)
+               of_changeset_destroy(&ovcs->cset);
 
        if (ovcs->id)
                idr_remove(&ovcs_idr, ovcs->id);
@@ -660,14 +662,14 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
  * A non-zero return value will not have created the changeset if error is from:
  *   - parameter checks
  *   - building the changeset
- *   - overlay changset pre-apply notifier
+ *   - overlay changeset pre-apply notifier
  *
  * If an error is returned by an overlay changeset pre-apply notifier
  * then no further overlay changeset pre-apply notifier will be called.
  *
  * A non-zero return value will have created the changeset if error is from:
  *   - overlay changeset entry notifier
- *   - overlay changset post-apply notifier
+ *   - overlay changeset post-apply notifier
  *
  * If an error is returned by an overlay changeset post-apply notifier
  * then no further overlay changeset post-apply notifier will be called.
@@ -706,12 +708,11 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
        }
 
        of_overlay_mutex_lock();
+       mutex_lock(&of_mutex);
 
        ret = of_resolve_phandles(tree);
        if (ret)
-               goto err_overlay_unlock;
-
-       mutex_lock(&of_mutex);
+               goto err_free_overlay_changeset;
 
        ret = init_overlay_changeset(ovcs, tree);
        if (ret)
@@ -736,14 +737,13 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
                        devicetree_state_flags |= DTSF_APPLY_FAIL;
                }
                goto err_free_overlay_changeset;
-       } else {
-               ret = __of_changeset_apply_notify(&ovcs->cset);
-               if (ret)
-                       pr_err("overlay changeset entry notify error %d\n",
-                              ret);
-               /* fall through */
        }
 
+       ret = __of_changeset_apply_notify(&ovcs->cset);
+       if (ret)
+               pr_err("overlay changeset entry notify error %d\n", ret);
+       /* notify failure is not fatal, continue */
+
        list_add_tail(&ovcs->ovcs_list, &ovcs_list);
        *ovcs_id = ovcs->id;
 
@@ -755,18 +755,14 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
                        ret = ret_tmp;
        }
 
-       mutex_unlock(&of_mutex);
-       of_overlay_mutex_unlock();
-
-       goto out;
-
-err_overlay_unlock:
-       of_overlay_mutex_unlock();
+       goto out_unlock;
 
 err_free_overlay_changeset:
        free_overlay_changeset(ovcs);
 
+out_unlock:
        mutex_unlock(&of_mutex);
+       of_overlay_mutex_unlock();
 
 out:
        pr_debug("%s() err=%d\n", __func__, ret);
@@ -871,7 +867,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
  *
  * A non-zero return value will not revert the changeset if error is from:
  *   - parameter checks
- *   - overlay changset pre-remove notifier
+ *   - overlay changeset pre-remove notifier
  *   - overlay changeset entry revert
  *
  * If an error is returned by an overlay changeset pre-remove notifier
@@ -882,7 +878,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
  *
  * A non-zero return value will revert the changeset if error is from:
  *   - overlay changeset entry notifier
- *   - overlay changset post-remove notifier
+ *   - overlay changeset post-remove notifier
  *
  * If an error is returned by an overlay changeset post-remove notifier
  * then no further overlay changeset post-remove notifier will be called.
@@ -931,15 +927,13 @@ int of_overlay_remove(int *ovcs_id)
                if (ret_apply)
                        devicetree_state_flags |= DTSF_REVERT_FAIL;
                goto out_unlock;
-       } else {
-               ret = __of_changeset_revert_notify(&ovcs->cset);
-               if (ret) {
-                       pr_err("overlay changeset entry notify error %d\n",
-                              ret);
-                       /* fall through - changeset was reverted */
-               }
        }
 
+       ret = __of_changeset_revert_notify(&ovcs->cset);
+       if (ret)
+               pr_err("overlay changeset entry notify error %d\n", ret);
+       /* notify failure is not fatal, continue */
+
        *ovcs_id = 0;
 
        ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE);
index e568b1e..0f8052f 100644 (file)
@@ -2165,7 +2165,6 @@ static int __init overlay_data_add(int onum)
        ret = of_overlay_apply(info->np_overlay, &info->overlay_id);
        if (ret < 0) {
                pr_err("of_overlay_apply() (ret=%d), %d\n", ret, onum);
-               of_overlay_mutex_unlock();
                goto out_free_np_overlay;
        }
 
index e70ceb4..6ce6aef 100644 (file)
@@ -2,3 +2,4 @@ ccflags-$(CONFIG_DEBUG_DRIVER)  := -DDEBUG
 obj-y                          += core.o cpu.o
 obj-$(CONFIG_OF)               += of.o
 obj-$(CONFIG_DEBUG_FS)         += debugfs.o
+obj-$(CONFIG_ARM_TI_CPUFREQ)   += ti-opp-supply.o
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
new file mode 100644 (file)
index 0000000..370eff3
--- /dev/null
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *     Nishanth Menon <nm@ti.com>
+ *     Dave Gerlach <d-gerlach@ti.com>
+ *
+ * TI OPP supply driver that provides override into the regulator control
+ * for generic opp core to handle devices with ABB regulator and/or
+ * SmartReflex Class0.
+ */
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * struct ti_opp_supply_optimum_voltage_table - optimized voltage table
+ * @reference_uv:      reference voltage (usually Nominal voltage)
+ * @optimized_uv:      Optimized voltage from efuse
+ */
+struct ti_opp_supply_optimum_voltage_table {
+       unsigned int reference_uv;
+       unsigned int optimized_uv;
+};
+
+/**
+ * struct ti_opp_supply_data - OMAP specific opp supply data
+ * @vdd_table: Optimized voltage mapping table
+ * @num_vdd_table: number of entries in vdd_table
+ * @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ */
+struct ti_opp_supply_data {
+       struct ti_opp_supply_optimum_voltage_table *vdd_table;
+       u32 num_vdd_table;
+       u32 vdd_absolute_max_voltage_uv;
+};
+
+static struct ti_opp_supply_data opp_data;
+
+/**
+ * struct ti_opp_supply_of_data - device tree match data
+ * @flags:     specific type of opp supply
+ * @efuse_voltage_mask: mask required for efuse register representing voltage
+ * @efuse_voltage_uv: Are the efuse entries in micro-volts? if not, assume
+ *             milli-volts.
+ */
+struct ti_opp_supply_of_data {
+#define OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE   BIT(1)
+#define OPPDM_HAS_NO_ABB                       BIT(2)
+       const u8 flags;
+       const u32 efuse_voltage_mask;
+       const bool efuse_voltage_uv;
+};
+
+/**
+ * _store_optimized_voltages() - store optimized voltages
+ * @dev:       ti opp supply device for which we need to store info
+ * @data:      data specific to the device
+ *
+ * Picks up efuse based optimized voltages for VDD unique per device and
+ * stores it in internal data structure for use during transition requests.
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int _store_optimized_voltages(struct device *dev,
+                                    struct ti_opp_supply_data *data)
+{
+       void __iomem *base;
+       struct property *prop;
+       struct resource *res;
+       const __be32 *val;
+       int proplen, i;
+       int ret = 0;
+       struct ti_opp_supply_optimum_voltage_table *table;
+       const struct ti_opp_supply_of_data *of_data = dev_get_drvdata(dev);
+
+       /* pick up Efuse based voltages */
+       res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "Unable to get IO resource\n");
+               ret = -ENODEV;
+               goto out_map;
+       }
+
+       base = ioremap_nocache(res->start, resource_size(res));
+       if (!base) {
+               dev_err(dev, "Unable to map Efuse registers\n");
+               ret = -ENOMEM;
+               goto out_map;
+       }
+
+       /* Fetch efuse-settings. */
+       prop = of_find_property(dev->of_node, "ti,efuse-settings", NULL);
+       if (!prop) {
+               dev_err(dev, "No 'ti,efuse-settings' property found\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       proplen = prop->length / sizeof(int);
+       data->num_vdd_table = proplen / 2;
+       /* Verify for corrupted OPP entries in dt */
+       if (data->num_vdd_table * 2 * sizeof(int) != prop->length) {
+               dev_err(dev, "Invalid 'ti,efuse-settings'\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "ti,absolute-max-voltage-uv",
+                                  &data->vdd_absolute_max_voltage_uv);
+       if (ret) {
+               dev_err(dev, "ti,absolute-max-voltage-uv is missing\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       table = kzalloc(sizeof(*data->vdd_table) *
+                                 data->num_vdd_table, GFP_KERNEL);
+       if (!table) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       data->vdd_table = table;
+
+       val = prop->value;
+       for (i = 0; i < data->num_vdd_table; i++, table++) {
+               u32 efuse_offset;
+               u32 tmp;
+
+               table->reference_uv = be32_to_cpup(val++);
+               efuse_offset = be32_to_cpup(val++);
+
+               tmp = readl(base + efuse_offset);
+               tmp &= of_data->efuse_voltage_mask;
+               tmp >>= __ffs(of_data->efuse_voltage_mask);
+
+               table->optimized_uv = of_data->efuse_voltage_uv ? tmp :
+                                       tmp * 1000;
+
+               dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d vset=%d\n",
+                       i, efuse_offset, table->reference_uv,
+                       table->optimized_uv);
+
+               /*
+                * Some older samples might not have optimized efuse
+                * Use reference voltage for those - just add debug message
+                * for them.
+                */
+               if (!table->optimized_uv) {
+                       dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d:vset0\n",
+                               i, efuse_offset, table->reference_uv);
+                       table->optimized_uv = table->reference_uv;
+               }
+       }
+out:
+       iounmap(base);
+out_map:
+       return ret;
+}
+
+/**
+ * _free_optimized_voltages() - free resources for optvoltages
+ * @dev:       device for which we need to free info
+ * @data:      data specific to the device
+ */
+static void _free_optimized_voltages(struct device *dev,
+                                    struct ti_opp_supply_data *data)
+{
+       kfree(data->vdd_table);
+       data->vdd_table = NULL;
+       data->num_vdd_table = 0;
+}
+
+/**
+ * _get_optimal_vdd_voltage() - Finds optimal voltage for the supply
+ * @dev:       device for which we need to find info
+ * @data:      data specific to the device
+ * @reference_uv:      reference voltage (OPP voltage) for which we need value
+ *
+ * Return: if a match is found, return optimized voltage, else return
+ * reference_uv, also return reference_uv if no optimization is needed.
+ */
+static int _get_optimal_vdd_voltage(struct device *dev,
+                                   struct ti_opp_supply_data *data,
+                                   int reference_uv)
+{
+       int i;
+       struct ti_opp_supply_optimum_voltage_table *table;
+
+       if (!data->num_vdd_table)
+               return reference_uv;
+
+       table = data->vdd_table;
+       if (!table)
+               return -EINVAL;
+
+       /* Find a exact match - this list is usually very small */
+       for (i = 0; i < data->num_vdd_table; i++, table++)
+               if (table->reference_uv == reference_uv)
+                       return table->optimized_uv;
+
+       /* IF things are screwed up, we'd make a mess on console.. ratelimit */
+       dev_err_ratelimited(dev, "%s: Failed optimized voltage match for %d\n",
+                           __func__, reference_uv);
+       return reference_uv;
+}
+
+static int _opp_set_voltage(struct device *dev,
+                           struct dev_pm_opp_supply *supply,
+                           int new_target_uv, struct regulator *reg,
+                           char *reg_name)
+{
+       int ret;
+       unsigned long vdd_uv, uv_max;
+
+       if (new_target_uv)
+               vdd_uv = new_target_uv;
+       else
+               vdd_uv = supply->u_volt;
+
+       /*
+        * If we do have an absolute max voltage specified, then we should
+        * use that voltage instead to allow for cases where the voltage rails
+        * are ganged (example if we set the max for an opp as 1.12v, and
+        * the absolute max is 1.5v, for another rail to get 1.25v, it cannot
+        * be achieved if the regulator is constrainted to max of 1.12v, even
+        * if it can function at 1.25v
+        */
+       if (opp_data.vdd_absolute_max_voltage_uv)
+               uv_max = opp_data.vdd_absolute_max_voltage_uv;
+       else
+               uv_max = supply->u_volt_max;
+
+       if (vdd_uv > uv_max ||
+           vdd_uv < supply->u_volt_min ||
+           supply->u_volt_min > uv_max) {
+               dev_warn(dev,
+                        "Invalid range voltages [Min:%lu target:%lu Max:%lu]\n",
+                        supply->u_volt_min, vdd_uv, uv_max);
+               return -EINVAL;
+       }
+
+       dev_dbg(dev, "%s scaling to %luuV[min %luuV max %luuV]\n", reg_name,
+               vdd_uv, supply->u_volt_min,
+               uv_max);
+
+       ret = regulator_set_voltage_triplet(reg,
+                                           supply->u_volt_min,
+                                           vdd_uv,
+                                           uv_max);
+       if (ret) {
+               dev_err(dev, "%s failed for %luuV[min %luuV max %luuV]\n",
+                       reg_name, vdd_uv, supply->u_volt_min,
+                       uv_max);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * ti_opp_supply_set_opp() - do the opp supply transition
+ * @data:      information on regulators and new and old opps provided by
+ *             opp core to use in transition
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+{
+       struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
+       struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
+       struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
+       struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
+       struct device *dev = data->dev;
+       unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+       struct clk *clk = data->clk;
+       struct regulator *vdd_reg = data->regulators[0];
+       struct regulator *vbb_reg = data->regulators[1];
+       int vdd_uv;
+       int ret;
+
+       vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
+                                         new_supply_vbb->u_volt);
+
+       /* Scaling up? Scale voltage before frequency */
+       if (freq > old_freq) {
+               ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+                                      "vdd");
+               if (ret)
+                       goto restore_voltage;
+
+               ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+               if (ret)
+                       goto restore_voltage;
+       }
+
+       /* Change frequency */
+       dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+               __func__, old_freq, freq);
+
+       ret = clk_set_rate(clk, freq);
+       if (ret) {
+               dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+                       ret);
+               goto restore_voltage;
+       }
+
+       /* Scaling down? Scale voltage after frequency */
+       if (freq < old_freq) {
+               ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+               if (ret)
+                       goto restore_freq;
+
+               ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+                                      "vdd");
+               if (ret)
+                       goto restore_freq;
+       }
+
+       return 0;
+
+restore_freq:
+       ret = clk_set_rate(clk, old_freq);
+       if (ret)
+               dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+                       __func__, old_freq);
+restore_voltage:
+       /* This shouldn't harm even if the voltages weren't updated earlier */
+       if (old_supply_vdd->u_volt) {
+               ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
+               if (ret)
+                       return ret;
+
+               ret = _opp_set_voltage(dev, old_supply_vdd, 0, vdd_reg,
+                                      "vdd");
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+static const struct ti_opp_supply_of_data omap_generic_of_data = {
+};
+
+static const struct ti_opp_supply_of_data omap_omap5_of_data = {
+       .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE,
+       .efuse_voltage_mask = 0xFFF,
+       .efuse_voltage_uv = false,
+};
+
+static const struct ti_opp_supply_of_data omap_omap5core_of_data = {
+       .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE | OPPDM_HAS_NO_ABB,
+       .efuse_voltage_mask = 0xFFF,
+       .efuse_voltage_uv = false,
+};
+
+static const struct of_device_id ti_opp_supply_of_match[] = {
+       {.compatible = "ti,omap-opp-supply", .data = &omap_generic_of_data},
+       {.compatible = "ti,omap5-opp-supply", .data = &omap_omap5_of_data},
+       {.compatible = "ti,omap5-core-opp-supply",
+        .data = &omap_omap5core_of_data},
+       {},
+};
+MODULE_DEVICE_TABLE(of, ti_opp_supply_of_match);
+
+static int ti_opp_supply_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device *cpu_dev = get_cpu_device(0);
+       const struct of_device_id *match;
+       const struct ti_opp_supply_of_data *of_data;
+       int ret = 0;
+
+       match = of_match_device(ti_opp_supply_of_match, dev);
+       if (!match) {
+               /* We do not expect this to happen */
+               dev_err(dev, "%s: Unable to match device\n", __func__);
+               return -ENODEV;
+       }
+       if (!match->data) {
+               /* Again, unlikely.. but mistakes do happen */
+               dev_err(dev, "%s: Bad data in match\n", __func__);
+               return -EINVAL;
+       }
+       of_data = match->data;
+
+       dev_set_drvdata(dev, (void *)of_data);
+
+       /* If we need optimized voltage */
+       if (of_data->flags & OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE) {
+               ret = _store_optimized_voltages(dev, &opp_data);
+               if (ret)
+                       return ret;
+       }
+
+       ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
+                                                                ti_opp_supply_set_opp));
+       if (ret)
+               _free_optimized_voltages(dev, &opp_data);
+
+       return ret;
+}
+
+static struct platform_driver ti_opp_supply_driver = {
+       .probe = ti_opp_supply_probe,
+       .driver = {
+                  .name = "ti_opp_supply",
+                  .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(ti_opp_supply_of_match),
+                  },
+};
+module_platform_driver(ti_opp_supply_driver);
+
+MODULE_DESCRIPTION("Texas Instruments OMAP OPP Supply driver");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
index 0b3fb99..7390fb8 100644 (file)
@@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d)
        struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
        int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
 
-       DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+       DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
 
        /* Clear the matching bit in the IMR register */
        dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d)
        int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
        u32 tmp;
 
-       DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+       DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
 
        /*
        ** clear pending IRQ bits
@@ -396,7 +396,7 @@ ilr_again:
        if (mask) {
                if (--ilr_loop > 0)
                        goto ilr_again;
-               printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n", 
+               printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
                       dino_dev->hba.base_addr, mask);
                return IRQ_NONE;
        }
@@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus)
         struct pci_dev *dev;
         struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
 
-       DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
+       DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n",
            __func__, bus, bus->busn_res.start,
            bus->bridge->platform_data);
 
@@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev,
        res->flags = IORESOURCE_IO; /* do not mark it busy ! */
        if (request_resource(&ioport_resource, res) < 0) {
                printk(KERN_ERR "%s: request I/O Port region failed "
-                      "0x%lx/%lx (hpa 0x%p)\n",
+                      "0x%lx/%lx (hpa 0x%px)\n",
                       name, (unsigned long)res->start, (unsigned long)res->end,
                       dino_dev->hba.base_addr);
                return 1;
index 4dd9b13..99a80da 100644 (file)
@@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void)
                return retval;
        }
 
-       printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr);
+       printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr);
        return 0;
 }
 
index a25fed5..41b740a 100644 (file)
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
        iounmap(base_addr);
 }
 
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+           dev->subsystem_device != 0x1292)
+               return;
+
+       dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+       dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+       quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+           dev->subsystem_device != 0x1291)
+               return;
+
+       dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+       dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+       quirk_diva_aux_disable);
index 04dac6a..6b8d060 100644 (file)
@@ -985,9 +985,7 @@ static u32 hv_compose_msi_req_v1(
        int_pkt->wslot.slot = slot;
        int_pkt->int_desc.vector = vector;
        int_pkt->int_desc.vector_count = 1;
-       int_pkt->int_desc.delivery_mode =
-               (apic->irq_delivery_mode == dest_LowestPrio) ?
-                       dest_LowestPrio : dest_Fixed;
+       int_pkt->int_desc.delivery_mode = dest_Fixed;
 
        /*
         * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1008,9 +1006,7 @@ static u32 hv_compose_msi_req_v2(
        int_pkt->wslot.slot = slot;
        int_pkt->int_desc.vector = vector;
        int_pkt->int_desc.vector_count = 1;
-       int_pkt->int_desc.delivery_mode =
-               (apic->irq_delivery_mode == dest_LowestPrio) ?
-                       dest_LowestPrio : dest_Fixed;
+       int_pkt->int_desc.delivery_mode = dest_Fixed;
 
        /*
         * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
index 12796ec..52ab3cb 100644 (file)
@@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
        err = rcar_pcie_get_resources(pcie);
        if (err < 0) {
                dev_err(dev, "failed to request resources: %d\n", err);
-               goto err_free_bridge;
+               goto err_free_resource_list;
        }
 
        err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
        if (err)
-               goto err_free_bridge;
+               goto err_free_resource_list;
 
        pm_runtime_enable(dev);
        err = pm_runtime_get_sync(dev);
@@ -1176,9 +1176,9 @@ err_pm_put:
 err_pm_disable:
        pm_runtime_disable(dev);
 
-err_free_bridge:
-       pci_free_host_bridge(bridge);
+err_free_resource_list:
        pci_free_resource_list(&pcie->resources);
+       pci_free_host_bridge(bridge);
 
        return err;
 }
index 7f47bb7..5958c8d 100644 (file)
@@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev)
        pm_generic_complete(dev);
 
        /* Resume device if platform firmware has put it in reset-power-on */
-       if (dev->power.direct_complete && pm_resume_via_firmware()) {
+       if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
                pci_power_t pre_sleep_state = pci_dev->current_state;
 
                pci_update_current_state(pci_dev, pci_dev->current_state);
@@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev)
        struct pci_dev *pci_dev = to_pci_dev(dev);
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-       if (dev_pm_smart_suspend_and_suspended(dev))
+       if (dev_pm_smart_suspend_and_suspended(dev)) {
+               dev->power.may_skip_resume = true;
                return 0;
+       }
 
        if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
 Fixup:
        pci_fixup_device(pci_fixup_suspend_late, pci_dev);
 
+       /*
+        * If the target system sleep state is suspend-to-idle, it is sufficient
+        * to check whether or not the device's wakeup settings are good for
+        * runtime PM.  Otherwise, the pm_resume_via_firmware() check will cause
+        * pci_pm_complete() to take care of fixing up the device's state
+        * anyway, if need be.
+        */
+       dev->power.may_skip_resume = device_may_wakeup(dev) ||
+                                       !device_can_wakeup(dev);
+
        return 0;
 }
 
@@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev)
        struct device_driver *drv = dev->driver;
        int error = 0;
 
+       if (dev_pm_may_skip_resume(dev))
+               return 0;
+
        /*
         * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
         * during system suspend, so update their runtime PM status to "active"
@@ -953,7 +968,7 @@ static int pci_pm_freeze_late(struct device *dev)
        if (dev_pm_smart_suspend_and_suspended(dev))
                return 0;
 
-       return pm_generic_freeze_late(dev);;
+       return pm_generic_freeze_late(dev);
 }
 
 static int pci_pm_freeze_noirq(struct device *dev)
@@ -999,7 +1014,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
         * the subsequent "thaw" callbacks for the device.
         */
        if (dev_pm_smart_suspend_and_suspended(dev)) {
-               dev->power.direct_complete = true;
+               dev_pm_skip_next_resume_phases(dev);
                return 0;
        }
 
@@ -1012,7 +1027,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
        if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_resume_early(dev);
 
-       pci_update_current_state(pci_dev, PCI_D0);
+       /*
+        * pci_restore_state() requires the device to be in D0 (because of MSI
+        * restoration among other things), so force it into D0 in case the
+        * driver's "freeze" callbacks put it into a low-power state directly.
+        */
+       pci_set_power_state(pci_dev, PCI_D0);
        pci_restore_state(pci_dev);
 
        if (drv && drv->pm && drv->pm->thaw_noirq)
index ffbf4e7..fb1c1bb 100644 (file)
@@ -150,6 +150,9 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
 
        pci_save_state(dev);
 
+       dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
+                                          DPM_FLAG_LEAVE_SUSPENDED);
+
        if (pci_bridge_d3_possible(dev)) {
                /*
                 * Keep the port resumed 100ms to make sure things like
index accaaac..6601ad0 100644 (file)
@@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
        int irq, error;
 
        irq = platform_get_irq_byname(pdev, name);
-       if (!irq)
+       if (irq < 0)
                return -ENODEV;
 
        error = devm_request_threaded_irq(ddata->dev, irq, NULL,
index b4964b0..8f6e8e2 100644 (file)
@@ -410,6 +410,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
        if (ret)
                return ERR_PTR(-ENODEV);
 
+       /* This phy type handled by the usb-phy subsystem for now */
+       if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
+               return ERR_PTR(-ENODEV);
+
        mutex_lock(&phy_provider_mutex);
        phy_provider = of_phy_provider_lookup(args.np);
        if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
index cb09245..c845fac 100644 (file)
@@ -12,7 +12,9 @@ config PHY_RCAR_GEN3_USB2
        tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
        depends on ARCH_RENESAS
        depends on EXTCON
+       depends on USB_SUPPORT
        select GENERIC_PHY
+       select USB_COMMON
        help
          Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
 
index ee85fa0..7492c89 100644 (file)
@@ -1137,6 +1137,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
                if (IS_ERR(phy)) {
                        dev_err(dev, "failed to create phy: %s\n",
                                child_np->name);
+                       pm_runtime_disable(dev);
                        return PTR_ERR(phy);
                }
 
@@ -1146,6 +1147,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
        if (IS_ERR(phy_provider)) {
                dev_err(dev, "Failed to register phy provider\n");
+               pm_runtime_disable(dev);
                return PTR_ERR(phy_provider);
        }
 
index 4307bf0..63e916d 100644 (file)
@@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
 static struct device_node *
 tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
 {
-       /*
-        * of_find_node_by_name() drops a reference, so make sure to grab one.
-        */
-       struct device_node *np = of_node_get(padctl->dev->of_node);
+       struct device_node *pads, *np;
+
+       pads = of_get_child_by_name(padctl->dev->of_node, "pads");
+       if (!pads)
+               return NULL;
 
-       np = of_find_node_by_name(np, "pads");
-       if (np)
-               np = of_find_node_by_name(np, name);
+       np = of_get_child_by_name(pads, name);
+       of_node_put(pads);
 
        return np;
 }
@@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
 static struct device_node *
 tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
 {
-       /*
-        * of_find_node_by_name() drops a reference, so make sure to grab one.
-        */
-       struct device_node *np = of_node_get(pad->dev.of_node);
+       struct device_node *np, *lanes;
 
-       np = of_find_node_by_name(np, "lanes");
-       if (!np)
+       lanes = of_get_child_by_name(pad->dev.of_node, "lanes");
+       if (!lanes)
                return NULL;
 
-       return of_find_node_by_name(np, pad->soc->lanes[index].name);
+       np = of_get_child_by_name(lanes, pad->soc->lanes[index].name);
+       of_node_put(lanes);
+
+       return np;
 }
 
 static int
@@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
        unsigned int i;
        int err;
 
-       children = of_find_node_by_name(pad->dev.of_node, "lanes");
+       children = of_get_child_by_name(pad->dev.of_node, "lanes");
        if (!children)
                return -ENODEV;
 
@@ -444,21 +444,21 @@ static struct device_node *
 tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
                          unsigned int index)
 {
-       /*
-        * of_find_node_by_name() drops a reference, so make sure to grab one.
-        */
-       struct device_node *np = of_node_get(padctl->dev->of_node);
+       struct device_node *ports, *np;
+       char *name;
 
-       np = of_find_node_by_name(np, "ports");
-       if (np) {
-               char *name;
+       ports = of_get_child_by_name(padctl->dev->of_node, "ports");
+       if (!ports)
+               return NULL;
 
-               name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
-               if (!name)
-                       return ERR_PTR(-ENOMEM);
-               np = of_find_node_by_name(np, name);
-               kfree(name);
+       name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+       if (!name) {
+               of_node_put(ports);
+               return ERR_PTR(-ENOMEM);
        }
+       np = of_get_child_by_name(ports, name);
+       kfree(name);
+       of_node_put(ports);
 
        return np;
 }
@@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
 
 static int tegra_xusb_padctl_probe(struct platform_device *pdev)
 {
-       struct device_node *np = of_node_get(pdev->dev.of_node);
+       struct device_node *np = pdev->dev.of_node;
        const struct tegra_xusb_padctl_soc *soc;
        struct tegra_xusb_padctl *padctl;
        const struct of_device_id *match;
@@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
        int err;
 
        /* for backwards compatibility with old device trees */
-       np = of_find_node_by_name(np, "pads");
+       np = of_get_child_by_name(np, "pads");
        if (!np) {
                dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
                return tegra_xusb_padctl_legacy_probe(pdev);
index bdedb63..4471fd9 100644 (file)
@@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                        clear_bit(i, chip->irq.valid_mask);
        }
 
+       /*
+        * The same set of machines in chv_no_valid_mask[] have incorrectly
+        * configured GPIOs that generate spurious interrupts so we use
+        * this same list to apply another quirk for them.
+        *
+        * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
+        */
+       if (!need_valid_mask) {
+               /*
+                * Mask all interrupts the community is able to generate
+                * but leave the ones that can only generate GPEs unmasked.
+                */
+               chv_writel(GENMASK(31, pctrl->community->nirqs),
+                          pctrl->regs + CHV_INTMASK);
+       }
+
        /* Clear all interrupts */
        chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
index 4500880..6572550 100644 (file)
@@ -207,7 +207,7 @@ static const unsigned int dnv_uart0_pins[] = { 60, 61, 64, 65 };
 static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 };
 static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 };
 static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 };
-static const unsigned int dnv_uart2_modes[] = { 1, 1, 2, 2 };
+static const unsigned int dnv_uart2_modes[] = { 1, 2, 2, 2 };
 static const unsigned int dnv_emmc_pins[] = {
        142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
 };
index d45af31..bdb8d17 100644 (file)
@@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
 {
        struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
        unsigned int reg = OUTPUT_EN;
-       unsigned int mask;
+       unsigned int mask, val, ret;
 
        armada_37xx_update_reg(&reg, offset);
        mask = BIT(offset);
 
-       return regmap_update_bits(info->regmap, reg, mask, mask);
+       ret = regmap_update_bits(info->regmap, reg, mask, mask);
+
+       if (ret)
+               return ret;
+
+       reg = OUTPUT_VAL;
+       val = value ? mask : 0;
+       regmap_update_bits(info->regmap, reg, mask, val);
+
+       return 0;
 }
 
 static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
index e9b83e2..c11b8f1 100644 (file)
@@ -2322,7 +2322,7 @@ static const struct gemini_pin_conf *gemini_get_pin_conf(struct gemini_pmx *pmx,
        int i;
 
        for (i = 0; i < pmx->nconfs; i++) {
-               retconf = &gemini_confs_3516[i];
+               retconf = &pmx->confs[i];
                if (retconf->pin == pin)
                        return retconf;
        }
index e6cd8de..3501491 100644 (file)
@@ -222,6 +222,9 @@ static enum pin_config_param pcs_bias[] = {
  */
 static struct lock_class_key pcs_lock_class;
 
+/* Class for the IRQ request mutex */
+static struct lock_class_key pcs_request_class;
+
 /*
  * REVISIT: Reads and writes could eventually use regmap or something
  * generic. But at least on omaps, some mux registers are performance
@@ -1486,7 +1489,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
        irq_set_chip_data(irq, pcs_soc);
        irq_set_chip_and_handler(irq, &pcs->chip,
                                 handle_level_irq);
-       irq_set_lockdep_class(irq, &pcs_lock_class);
+       irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class);
        irq_set_noprobe(irq);
 
        return 0;
index a276c61..e62ab08 100644 (file)
@@ -290,7 +290,7 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
 }
 
 static int stm32_gpio_domain_activate(struct irq_domain *d,
-                                     struct irq_data *irq_data, bool early)
+                                     struct irq_data *irq_data, bool reserve)
 {
        struct stm32_gpio_bank *bank = d->host_data;
        struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
index 4f2a726..f5f7743 100644 (file)
@@ -428,7 +428,7 @@ static const struct sunxi_desc_pin a64_pins[] = {
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x2, "mmc0"),          /* D3 */
-                 SUNXI_FUNCTION(0x4, "uart0")),        /* RX */
+                 SUNXI_FUNCTION(0x3, "uart0")),        /* RX */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
index 97b4833..a78d7b9 100644 (file)
@@ -535,14 +535,16 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data_broken = {
        .pins = sun50i_h5_pins,
        .npins = ARRAY_SIZE(sun50i_h5_pins),
        .irq_banks = 2,
-       .irq_read_needs_mux = true
+       .irq_read_needs_mux = true,
+       .disable_strict_mode = true,
 };
 
 static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
        .pins = sun50i_h5_pins,
        .npins = ARRAY_SIZE(sun50i_h5_pins),
        .irq_banks = 3,
-       .irq_read_needs_mux = true
+       .irq_read_needs_mux = true,
+       .disable_strict_mode = true,
 };
 
 static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
index 472ef0d..5553c0e 100644 (file)
@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* MCLK */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* SCK */
                  SUNXI_FUNCTION(0x4, "i2c4"),          /* SCK */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* SDA */
                  SUNXI_FUNCTION(0x4, "i2c4"),          /* SDA */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */
 
        /* Hole */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
index 0ad6e29..e728a96 100644 (file)
@@ -38,14 +38,8 @@ config CHROMEOS_PSTORE
          If you have a supported Chromebook, choose Y or M here.
          The module will be called chromeos_pstore.
 
-config CROS_EC_CHARDEV
-        tristate "Chrome OS Embedded Controller userspace device interface"
-        depends on MFD_CROS_EC
-        ---help---
-          This driver adds support to talk with the ChromeOS EC from userspace.
-
-          If you have a supported Chromebook, choose Y or M here.
-          The module will be called cros_ec_dev.
+config CROS_EC_CTL
+        tristate
 
 config CROS_EC_LPC
         tristate "ChromeOS Embedded Controller (LPC)"
index a077b1f..ff3b369 100644 (file)
@@ -2,10 +2,9 @@
 
 obj-$(CONFIG_CHROMEOS_LAPTOP)          += chromeos_laptop.o
 obj-$(CONFIG_CHROMEOS_PSTORE)          += chromeos_pstore.o
-cros_ec_devs-objs                      := cros_ec_dev.o cros_ec_sysfs.o \
-                                          cros_ec_lightbar.o cros_ec_vbc.o \
-                                          cros_ec_debugfs.o
-obj-$(CONFIG_CROS_EC_CHARDEV)          += cros_ec_devs.o
+cros_ec_ctl-objs                       := cros_ec_sysfs.o cros_ec_lightbar.o \
+                                          cros_ec_vbc.o cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_CTL)              += cros_ec_ctl.o
 cros_ec_lpcs-objs                      := cros_ec_lpc.o cros_ec_lpc_reg.o
 cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
 obj-$(CONFIG_CROS_EC_LPC)              += cros_ec_lpcs.o
index 4cc66f4..98a35d3 100644 (file)
@@ -29,9 +29,6 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 
-#include "cros_ec_dev.h"
-#include "cros_ec_debugfs.h"
-
 #define LOG_SHIFT              14
 #define LOG_SIZE               (1 << LOG_SHIFT)
 #define LOG_POLL_SEC           10
@@ -390,6 +387,7 @@ remove_debugfs:
        debugfs_remove_recursive(debug_info->dir);
        return ret;
 }
+EXPORT_SYMBOL(cros_ec_debugfs_init);
 
 void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
 {
@@ -399,3 +397,4 @@ void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
        debugfs_remove_recursive(ec->debug_info->dir);
        cros_ec_cleanup_console_log(ec->debug_info);
 }
+EXPORT_SYMBOL(cros_ec_debugfs_remove);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.h b/drivers/platform/chrome/cros_ec_debugfs.h
deleted file mode 100644 (file)
index 1ff3a50..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2015 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _DRV_CROS_EC_DEBUGFS_H_
-#define _DRV_CROS_EC_DEBUGFS_H_
-
-#include "cros_ec_dev.h"
-
-/* debugfs stuff */
-int cros_ec_debugfs_init(struct cros_ec_dev *ec);
-void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
-
-#endif  /* _DRV_CROS_EC_DEBUGFS_H_ */
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
deleted file mode 100644 (file)
index cf6c4f0..0000000
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space
- *
- * Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/fs.h>
-#include <linux/mfd/core.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include "cros_ec_debugfs.h"
-#include "cros_ec_dev.h"
-
-/* Device variables */
-#define CROS_MAX_DEV 128
-static int ec_major;
-
-static const struct attribute_group *cros_ec_groups[] = {
-       &cros_ec_attr_group,
-       &cros_ec_lightbar_attr_group,
-       &cros_ec_vbc_attr_group,
-       NULL,
-};
-
-static struct class cros_class = {
-       .owner          = THIS_MODULE,
-       .name           = "chromeos",
-       .dev_groups     = cros_ec_groups,
-};
-
-/* Basic communication */
-static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
-{
-       struct ec_response_get_version *resp;
-       static const char * const current_image_name[] = {
-               "unknown", "read-only", "read-write", "invalid",
-       };
-       struct cros_ec_command *msg;
-       int ret;
-
-       msg = kmalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL);
-       if (!msg)
-               return -ENOMEM;
-
-       msg->version = 0;
-       msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
-       msg->insize = sizeof(*resp);
-       msg->outsize = 0;
-
-       ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
-       if (ret < 0)
-               goto exit;
-
-       if (msg->result != EC_RES_SUCCESS) {
-               snprintf(str, maxlen,
-                        "%s\nUnknown EC version: EC returned %d\n",
-                        CROS_EC_DEV_VERSION, msg->result);
-               ret = -EINVAL;
-               goto exit;
-       }
-
-       resp = (struct ec_response_get_version *)msg->data;
-       if (resp->current_image >= ARRAY_SIZE(current_image_name))
-               resp->current_image = 3; /* invalid */
-
-       snprintf(str, maxlen, "%s\n%s\n%s\n%s\n", CROS_EC_DEV_VERSION,
-                resp->version_string_ro, resp->version_string_rw,
-                current_image_name[resp->current_image]);
-
-       ret = 0;
-exit:
-       kfree(msg);
-       return ret;
-}
-
-static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
-{
-       struct cros_ec_command *msg;
-       int ret;
-
-       if (ec->features[0] == -1U && ec->features[1] == -1U) {
-               /* features bitmap not read yet */
-
-               msg = kmalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
-               if (!msg)
-                       return -ENOMEM;
-
-               msg->version = 0;
-               msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
-               msg->insize = sizeof(ec->features);
-               msg->outsize = 0;
-
-               ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
-               if (ret < 0 || msg->result != EC_RES_SUCCESS) {
-                       dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
-                                ret, msg->result);
-                       memset(ec->features, 0, sizeof(ec->features));
-               }
-
-               memcpy(ec->features, msg->data, sizeof(ec->features));
-
-               dev_dbg(ec->dev, "EC features %08x %08x\n",
-                       ec->features[0], ec->features[1]);
-
-               kfree(msg);
-       }
-
-       return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
-}
-
-/* Device file ops */
-static int ec_device_open(struct inode *inode, struct file *filp)
-{
-       struct cros_ec_dev *ec = container_of(inode->i_cdev,
-                                             struct cros_ec_dev, cdev);
-       filp->private_data = ec;
-       nonseekable_open(inode, filp);
-       return 0;
-}
-
-static int ec_device_release(struct inode *inode, struct file *filp)
-{
-       return 0;
-}
-
-static ssize_t ec_device_read(struct file *filp, char __user *buffer,
-                             size_t length, loff_t *offset)
-{
-       struct cros_ec_dev *ec = filp->private_data;
-       char msg[sizeof(struct ec_response_get_version) +
-                sizeof(CROS_EC_DEV_VERSION)];
-       size_t count;
-       int ret;
-
-       if (*offset != 0)
-               return 0;
-
-       ret = ec_get_version(ec, msg, sizeof(msg));
-       if (ret)
-               return ret;
-
-       count = min(length, strlen(msg));
-
-       if (copy_to_user(buffer, msg, count))
-               return -EFAULT;
-
-       *offset = count;
-       return count;
-}
-
-/* Ioctls */
-static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
-{
-       long ret;
-       struct cros_ec_command u_cmd;
-       struct cros_ec_command *s_cmd;
-
-       if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
-               return -EFAULT;
-
-       if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
-           (u_cmd.insize > EC_MAX_MSG_BYTES))
-               return -EINVAL;
-
-       s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
-                       GFP_KERNEL);
-       if (!s_cmd)
-               return -ENOMEM;
-
-       if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) {
-               ret = -EFAULT;
-               goto exit;
-       }
-
-       if (u_cmd.outsize != s_cmd->outsize ||
-           u_cmd.insize != s_cmd->insize) {
-               ret = -EINVAL;
-               goto exit;
-       }
-
-       s_cmd->command += ec->cmd_offset;
-       ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
-       /* Only copy data to userland if data was received. */
-       if (ret < 0)
-               goto exit;
-
-       if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
-               ret = -EFAULT;
-exit:
-       kfree(s_cmd);
-       return ret;
-}
-
-static long ec_device_ioctl_readmem(struct cros_ec_dev *ec, void __user *arg)
-{
-       struct cros_ec_device *ec_dev = ec->ec_dev;
-       struct cros_ec_readmem s_mem = { };
-       long num;
-
-       /* Not every platform supports direct reads */
-       if (!ec_dev->cmd_readmem)
-               return -ENOTTY;
-
-       if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
-               return -EFAULT;
-
-       num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes,
-                                 s_mem.buffer);
-       if (num <= 0)
-               return num;
-
-       if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static long ec_device_ioctl(struct file *filp, unsigned int cmd,
-                           unsigned long arg)
-{
-       struct cros_ec_dev *ec = filp->private_data;
-
-       if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC)
-               return -ENOTTY;
-
-       switch (cmd) {
-       case CROS_EC_DEV_IOCXCMD:
-               return ec_device_ioctl_xcmd(ec, (void __user *)arg);
-       case CROS_EC_DEV_IOCRDMEM:
-               return ec_device_ioctl_readmem(ec, (void __user *)arg);
-       }
-
-       return -ENOTTY;
-}
-
-/* Module initialization */
-static const struct file_operations fops = {
-       .open = ec_device_open,
-       .release = ec_device_release,
-       .read = ec_device_read,
-       .unlocked_ioctl = ec_device_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = ec_device_ioctl,
-#endif
-};
-
-static void __remove(struct device *dev)
-{
-       struct cros_ec_dev *ec = container_of(dev, struct cros_ec_dev,
-                                             class_dev);
-       kfree(ec);
-}
-
-static void cros_ec_sensors_register(struct cros_ec_dev *ec)
-{
-       /*
-        * Issue a command to get the number of sensor reported.
-        * Build an array of sensors driver and register them all.
-        */
-       int ret, i, id, sensor_num;
-       struct mfd_cell *sensor_cells;
-       struct cros_ec_sensor_platform *sensor_platforms;
-       int sensor_type[MOTIONSENSE_TYPE_MAX];
-       struct ec_params_motion_sense *params;
-       struct ec_response_motion_sense *resp;
-       struct cros_ec_command *msg;
-
-       msg = kzalloc(sizeof(struct cros_ec_command) +
-                     max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
-       if (msg == NULL)
-               return;
-
-       msg->version = 2;
-       msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
-       msg->outsize = sizeof(*params);
-       msg->insize = sizeof(*resp);
-
-       params = (struct ec_params_motion_sense *)msg->data;
-       params->cmd = MOTIONSENSE_CMD_DUMP;
-
-       ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
-       if (ret < 0 || msg->result != EC_RES_SUCCESS) {
-               dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
-                        ret, msg->result);
-               goto error;
-       }
-
-       resp = (struct ec_response_motion_sense *)msg->data;
-       sensor_num = resp->dump.sensor_count;
-       /* Allocate 2 extra sensors in case lid angle or FIFO are needed */
-       sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2),
-                              GFP_KERNEL);
-       if (sensor_cells == NULL)
-               goto error;
-
-       sensor_platforms = kzalloc(sizeof(struct cros_ec_sensor_platform) *
-                 (sensor_num + 1), GFP_KERNEL);
-       if (sensor_platforms == NULL)
-               goto error_platforms;
-
-       memset(sensor_type, 0, sizeof(sensor_type));
-       id = 0;
-       for (i = 0; i < sensor_num; i++) {
-               params->cmd = MOTIONSENSE_CMD_INFO;
-               params->info.sensor_num = i;
-               ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
-               if (ret < 0 || msg->result != EC_RES_SUCCESS) {
-                       dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
-                                i, ret, msg->result);
-                       continue;
-               }
-               switch (resp->info.type) {
-               case MOTIONSENSE_TYPE_ACCEL:
-                       sensor_cells[id].name = "cros-ec-accel";
-                       break;
-               case MOTIONSENSE_TYPE_BARO:
-                       sensor_cells[id].name = "cros-ec-baro";
-                       break;
-               case MOTIONSENSE_TYPE_GYRO:
-                       sensor_cells[id].name = "cros-ec-gyro";
-                       break;
-               case MOTIONSENSE_TYPE_MAG:
-                       sensor_cells[id].name = "cros-ec-mag";
-                       break;
-               case MOTIONSENSE_TYPE_PROX:
-                       sensor_cells[id].name = "cros-ec-prox";
-                       break;
-               case MOTIONSENSE_TYPE_LIGHT:
-                       sensor_cells[id].name = "cros-ec-light";
-                       break;
-               case MOTIONSENSE_TYPE_ACTIVITY:
-                       sensor_cells[id].name = "cros-ec-activity";
-                       break;
-               default:
-                       dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
-                       continue;
-               }
-               sensor_platforms[id].sensor_num = i;
-               sensor_cells[id].id = sensor_type[resp->info.type];
-               sensor_cells[id].platform_data = &sensor_platforms[id];
-               sensor_cells[id].pdata_size =
-                       sizeof(struct cros_ec_sensor_platform);
-
-               sensor_type[resp->info.type]++;
-               id++;
-       }
-       if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) {
-               sensor_platforms[id].sensor_num = sensor_num;
-
-               sensor_cells[id].name = "cros-ec-angle";
-               sensor_cells[id].id = 0;
-               sensor_cells[id].platform_data = &sensor_platforms[id];
-               sensor_cells[id].pdata_size =
-                       sizeof(struct cros_ec_sensor_platform);
-               id++;
-       }
-       if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
-               sensor_cells[id].name = "cros-ec-ring";
-               id++;
-       }
-
-       ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
-                             NULL, 0, NULL);
-       if (ret)
-               dev_err(ec->dev, "failed to add EC sensors\n");
-
-       kfree(sensor_platforms);
-error_platforms:
-       kfree(sensor_cells);
-error:
-       kfree(msg);
-}
-
-static int ec_device_probe(struct platform_device *pdev)
-{
-       int retval = -ENOMEM;
-       struct device *dev = &pdev->dev;
-       struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
-       struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
-
-       if (!ec)
-               return retval;
-
-       dev_set_drvdata(dev, ec);
-       ec->ec_dev = dev_get_drvdata(dev->parent);
-       ec->dev = dev;
-       ec->cmd_offset = ec_platform->cmd_offset;
-       ec->features[0] = -1U; /* Not cached yet */
-       ec->features[1] = -1U; /* Not cached yet */
-       device_initialize(&ec->class_dev);
-       cdev_init(&ec->cdev, &fops);
-
-       /*
-        * Add the class device
-        * Link to the character device for creating the /dev entry
-        * in devtmpfs.
-        */
-       ec->class_dev.devt = MKDEV(ec_major, pdev->id);
-       ec->class_dev.class = &cros_class;
-       ec->class_dev.parent = dev;
-       ec->class_dev.release = __remove;
-
-       retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name);
-       if (retval) {
-               dev_err(dev, "dev_set_name failed => %d\n", retval);
-               goto failed;
-       }
-
-       retval = cdev_device_add(&ec->cdev, &ec->class_dev);
-       if (retval) {
-               dev_err(dev, "cdev_device_add failed => %d\n", retval);
-               goto failed;
-       }
-
-       if (cros_ec_debugfs_init(ec))
-               dev_warn(dev, "failed to create debugfs directory\n");
-
-       /* check whether this EC is a sensor hub. */
-       if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
-               cros_ec_sensors_register(ec);
-
-       /* Take control of the lightbar from the EC. */
-       lb_manual_suspend_ctrl(ec, 1);
-
-       return 0;
-
-failed:
-       put_device(&ec->class_dev);
-       return retval;
-}
-
-static int ec_device_remove(struct platform_device *pdev)
-{
-       struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev);
-
-       /* Let the EC take over the lightbar again. */
-       lb_manual_suspend_ctrl(ec, 0);
-
-       cros_ec_debugfs_remove(ec);
-
-       cdev_del(&ec->cdev);
-       device_unregister(&ec->class_dev);
-       return 0;
-}
-
-static const struct platform_device_id cros_ec_id[] = {
-       { "cros-ec-ctl", 0 },
-       { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(platform, cros_ec_id);
-
-static __maybe_unused int ec_device_suspend(struct device *dev)
-{
-       struct cros_ec_dev *ec = dev_get_drvdata(dev);
-
-       lb_suspend(ec);
-
-       return 0;
-}
-
-static __maybe_unused int ec_device_resume(struct device *dev)
-{
-       struct cros_ec_dev *ec = dev_get_drvdata(dev);
-
-       lb_resume(ec);
-
-       return 0;
-}
-
-static const struct dev_pm_ops cros_ec_dev_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
-       .suspend = ec_device_suspend,
-       .resume = ec_device_resume,
-#endif
-};
-
-static struct platform_driver cros_ec_dev_driver = {
-       .driver = {
-               .name = "cros-ec-ctl",
-               .pm = &cros_ec_dev_pm_ops,
-       },
-       .probe = ec_device_probe,
-       .remove = ec_device_remove,
-};
-
-static int __init cros_ec_dev_init(void)
-{
-       int ret;
-       dev_t dev = 0;
-
-       ret  = class_register(&cros_class);
-       if (ret) {
-               pr_err(CROS_EC_DEV_NAME ": failed to register device class\n");
-               return ret;
-       }
-
-       /* Get a range of minor numbers (starting with 0) to work with */
-       ret = alloc_chrdev_region(&dev, 0, CROS_MAX_DEV, CROS_EC_DEV_NAME);
-       if (ret < 0) {
-               pr_err(CROS_EC_DEV_NAME ": alloc_chrdev_region() failed\n");
-               goto failed_chrdevreg;
-       }
-       ec_major = MAJOR(dev);
-
-       /* Register the driver */
-       ret = platform_driver_register(&cros_ec_dev_driver);
-       if (ret < 0) {
-               pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret);
-               goto failed_devreg;
-       }
-       return 0;
-
-failed_devreg:
-       unregister_chrdev_region(MKDEV(ec_major, 0), CROS_MAX_DEV);
-failed_chrdevreg:
-       class_unregister(&cros_class);
-       return ret;
-}
-
-static void __exit cros_ec_dev_exit(void)
-{
-       platform_driver_unregister(&cros_ec_dev_driver);
-       unregister_chrdev(ec_major, CROS_EC_DEV_NAME);
-       class_unregister(&cros_class);
-}
-
-module_init(cros_ec_dev_init);
-module_exit(cros_ec_dev_exit);
-
-MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
-MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
-MODULE_VERSION("1.0");
-MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/platform/chrome/cros_ec_dev.h
deleted file mode 100644 (file)
index 45e9453..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * cros_ec_dev - expose the Chrome OS Embedded Controller to userspace
- *
- * Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _CROS_EC_DEV_H_
-#define _CROS_EC_DEV_H_
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/mfd/cros_ec.h>
-
-#define CROS_EC_DEV_VERSION "1.0.0"
-
-/*
- * @offset: within EC_LPC_ADDR_MEMMAP region
- * @bytes: number of bytes to read. zero means "read a string" (including '\0')
- *         (at most only EC_MEMMAP_SIZE bytes can be read)
- * @buffer: where to store the result
- * ioctl returns the number of bytes read, negative on error
- */
-struct cros_ec_readmem {
-       uint32_t offset;
-       uint32_t bytes;
-       uint8_t buffer[EC_MEMMAP_SIZE];
-};
-
-#define CROS_EC_DEV_IOC       0xEC
-#define CROS_EC_DEV_IOCXCMD   _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command)
-#define CROS_EC_DEV_IOCRDMEM  _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem)
-
-/* Lightbar utilities */
-extern bool ec_has_lightbar(struct cros_ec_dev *ec);
-extern int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable);
-extern int lb_suspend(struct cros_ec_dev *ec);
-extern int lb_resume(struct cros_ec_dev *ec);
-
-#endif /* _CROS_EC_DEV_H_ */
index fd2b047..6ea79d4 100644 (file)
@@ -33,8 +33,6 @@
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 
-#include "cros_ec_dev.h"
-
 /* Rate-limit the lightbar interface to prevent DoS. */
 static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
 
@@ -414,6 +412,7 @@ error:
 
        return ret;
 }
+EXPORT_SYMBOL(lb_manual_suspend_ctrl);
 
 int lb_suspend(struct cros_ec_dev *ec)
 {
@@ -422,6 +421,7 @@ int lb_suspend(struct cros_ec_dev *ec)
 
        return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND);
 }
+EXPORT_SYMBOL(lb_suspend);
 
 int lb_resume(struct cros_ec_dev *ec)
 {
@@ -430,6 +430,7 @@ int lb_resume(struct cros_ec_dev *ec)
 
        return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME);
 }
+EXPORT_SYMBOL(lb_resume);
 
 static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
                              const char *buf, size_t count)
@@ -622,3 +623,4 @@ struct attribute_group cros_ec_lightbar_attr_group = {
        .attrs = __lb_cmds_attrs,
        .is_visible = cros_ec_lightbar_attrs_are_visible,
 };
+EXPORT_SYMBOL(cros_ec_lightbar_attr_group);
index f3baf99..d6eebe8 100644 (file)
@@ -34,8 +34,6 @@
 #include <linux/types.h>
 #include <linux/uaccess.h>
 
-#include "cros_ec_dev.h"
-
 /* Accessor functions */
 
 static ssize_t show_ec_reboot(struct device *dev,
@@ -294,4 +292,7 @@ static struct attribute *__ec_attrs[] = {
 struct attribute_group cros_ec_attr_group = {
        .attrs = __ec_attrs,
 };
+EXPORT_SYMBOL(cros_ec_attr_group);
 
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC control driver");
index 564a0d0..6d38e6b 100644 (file)
@@ -135,3 +135,4 @@ struct attribute_group cros_ec_vbc_attr_group = {
        .bin_attrs = cros_ec_vbc_bin_attrs,
        .is_bin_visible = cros_ec_vbc_is_visible,
 };
+EXPORT_SYMBOL(cros_ec_vbc_attr_group);
index f379616..d4aeac3 100644 (file)
@@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
                return;
        }
        input_report_key(data->idev, KEY_RFKILL, 1);
+       input_sync(data->idev);
        input_report_key(data->idev, KEY_RFKILL, 0);
        input_sync(data->idev);
 }
index bf897b1..cd4725e 100644 (file)
@@ -37,6 +37,7 @@
 
 struct quirk_entry {
        u8 touchpad_led;
+       u8 kbd_led_levels_off_1;
 
        int needs_kbd_timeouts;
        /*
@@ -67,6 +68,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
        .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
 };
 
+static struct quirk_entry quirk_dell_latitude_e6410 = {
+       .kbd_led_levels_off_1 = 1,
+};
+
 static struct platform_driver platform_driver = {
        .driver = {
                .name = "dell-laptop",
@@ -269,6 +274,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
                },
                .driver_data = &quirk_dell_xps13_9333,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "Dell Latitude E6410",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
+               },
+               .driver_data = &quirk_dell_latitude_e6410,
+       },
        { }
 };
 
@@ -1149,6 +1163,9 @@ static int kbd_get_info(struct kbd_info *info)
        units = (buffer->output[2] >> 8) & 0xFF;
        info->levels = (buffer->output[2] >> 16) & 0xFF;
 
+       if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
+               info->levels--;
+
        if (units & BIT(0))
                info->seconds = (buffer->output[3] >> 0) & 0xFF;
        if (units & BIT(1))
index 39d2f45..fb25b20 100644 (file)
@@ -639,6 +639,8 @@ static int dell_wmi_events_set_enabled(bool enable)
        int ret;
 
        buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
        buffer->cmd_class = CLASS_INFO;
        buffer->cmd_select = SELECT_APP_REGISTRATION;
        buffer->input[0] = 0x10000;
index 6505c97..1b49169 100644 (file)
@@ -119,7 +119,7 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
        if (key_code == KEY_RESERVED)
                return;
        if (pressed)
-               pm_wakeup_event(&device->dev, 0);
+               pm_wakeup_dev_event(&device->dev, 0, button->suspended);
        if (button->suspended)
                return;
        input_report_key(input, key_code, pressed?1:0);
@@ -185,6 +185,8 @@ static int surface_button_add(struct acpi_device *device)
        error = input_register_device(input);
        if (error)
                goto err_free_input;
+
+       device_init_wakeup(&device->dev, true);
        dev_info(&device->dev,
                        "%s [%s]\n", name, acpi_device_bid(device));
        return 0;
index 791449a..daa68ac 100644 (file)
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
        class_unregister(&wmi_bus_class);
 }
 
-subsys_initcall(acpi_wmi_init);
+subsys_initcall_sync(acpi_wmi_init);
 module_exit(acpi_wmi_exit);
index e681140..077f334 100644 (file)
@@ -581,10 +581,7 @@ static int __init pnpbios_thread_init(void)
 
        init_completion(&unload_sem);
        task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
-       if (IS_ERR(task))
-               return PTR_ERR(task);
-
-       return 0;
+       return PTR_ERR_OR_ZERO(task);
 }
 
 /* Start the kernel thread later: */
index f054cdd..803666a 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/slab.h>
 #include <linux/pnp.h>
 #include <linux/io.h>
-#include <linux/kallsyms.h>
 #include "base.h"
 
 static void quirk_awe32_add_ports(struct pnp_dev *dev,
index 75f63e3..ed2b109 100644 (file)
@@ -76,7 +76,7 @@ struct rockchip_iodomain_supply {
 struct rockchip_iodomain {
        struct device *dev;
        struct regmap *grf;
-       struct rockchip_iodomain_soc_data *soc_data;
+       const struct rockchip_iodomain_soc_data *soc_data;
        struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
 };
 
@@ -382,43 +382,43 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
 static const struct of_device_id rockchip_iodomain_match[] = {
        {
                .compatible = "rockchip,rk3188-io-voltage-domain",
-               .data = (void *)&soc_data_rk3188
+               .data = &soc_data_rk3188
        },
        {
                .compatible = "rockchip,rk3228-io-voltage-domain",
-               .data = (void *)&soc_data_rk3228
+               .data = &soc_data_rk3228
        },
        {
                .compatible = "rockchip,rk3288-io-voltage-domain",
-               .data = (void *)&soc_data_rk3288
+               .data = &soc_data_rk3288
        },
        {
                .compatible = "rockchip,rk3328-io-voltage-domain",
-               .data = (void *)&soc_data_rk3328
+               .data = &soc_data_rk3328
        },
        {
                .compatible = "rockchip,rk3368-io-voltage-domain",
-               .data = (void *)&soc_data_rk3368
+               .data = &soc_data_rk3368
        },
        {
                .compatible = "rockchip,rk3368-pmu-io-voltage-domain",
-               .data = (void *)&soc_data_rk3368_pmu
+               .data = &soc_data_rk3368_pmu
        },
        {
                .compatible = "rockchip,rk3399-io-voltage-domain",
-               .data = (void *)&soc_data_rk3399
+               .data = &soc_data_rk3399
        },
        {
                .compatible = "rockchip,rk3399-pmu-io-voltage-domain",
-               .data = (void *)&soc_data_rk3399_pmu
+               .data = &soc_data_rk3399_pmu
        },
        {
                .compatible = "rockchip,rv1108-io-voltage-domain",
-               .data = (void *)&soc_data_rv1108
+               .data = &soc_data_rv1108
        },
        {
                .compatible = "rockchip,rv1108-pmu-io-voltage-domain",
-               .data = (void *)&soc_data_rv1108_pmu
+               .data = &soc_data_rv1108_pmu
        },
        { /* sentinel */ },
 };
@@ -443,7 +443,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, iod);
 
        match = of_match_node(rockchip_iodomain_match, np);
-       iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
+       iod->soc_data = match->data;
 
        parent = pdev->dev.parent;
        if (parent && parent->of_node) {
index d1694f1..35636e1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/sysfs.h>
 #include <linux/cpu.h>
 #include <linux/powercap.h>
+#include <linux/suspend.h>
 #include <asm/iosf_mbi.h>
 
 #include <asm/processor.h>
@@ -155,6 +156,7 @@ struct rapl_power_limit {
        int prim_id; /* primitive ID used to enable */
        struct rapl_domain *domain;
        const char *name;
+       u64 last_power_limit;
 };
 
 static const char pl1_name[] = "long_term";
@@ -1209,7 +1211,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
        struct rapl_domain *rd;
        char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
        struct powercap_zone *power_zone = NULL;
-       int nr_pl, ret;;
+       int nr_pl, ret;
 
        /* Update the domain data of the new package */
        rapl_update_domain_data(rp);
@@ -1533,6 +1535,92 @@ static int rapl_cpu_down_prep(unsigned int cpu)
 
 static enum cpuhp_state pcap_rapl_online;
 
+static void power_limit_state_save(void)
+{
+       struct rapl_package *rp;
+       struct rapl_domain *rd;
+       int nr_pl, ret, i;
+
+       get_online_cpus();
+       list_for_each_entry(rp, &rapl_packages, plist) {
+               if (!rp->power_zone)
+                       continue;
+               rd = power_zone_to_rapl_domain(rp->power_zone);
+               nr_pl = find_nr_power_limit(rd);
+               for (i = 0; i < nr_pl; i++) {
+                       switch (rd->rpl[i].prim_id) {
+                       case PL1_ENABLE:
+                               ret = rapl_read_data_raw(rd,
+                                               POWER_LIMIT1,
+                                               true,
+                                               &rd->rpl[i].last_power_limit);
+                               if (ret)
+                                       rd->rpl[i].last_power_limit = 0;
+                               break;
+                       case PL2_ENABLE:
+                               ret = rapl_read_data_raw(rd,
+                                               POWER_LIMIT2,
+                                               true,
+                                               &rd->rpl[i].last_power_limit);
+                               if (ret)
+                                       rd->rpl[i].last_power_limit = 0;
+                               break;
+                       }
+               }
+       }
+       put_online_cpus();
+}
+
+static void power_limit_state_restore(void)
+{
+       struct rapl_package *rp;
+       struct rapl_domain *rd;
+       int nr_pl, i;
+
+       get_online_cpus();
+       list_for_each_entry(rp, &rapl_packages, plist) {
+               if (!rp->power_zone)
+                       continue;
+               rd = power_zone_to_rapl_domain(rp->power_zone);
+               nr_pl = find_nr_power_limit(rd);
+               for (i = 0; i < nr_pl; i++) {
+                       switch (rd->rpl[i].prim_id) {
+                       case PL1_ENABLE:
+                               if (rd->rpl[i].last_power_limit)
+                                       rapl_write_data_raw(rd,
+                                               POWER_LIMIT1,
+                                               rd->rpl[i].last_power_limit);
+                               break;
+                       case PL2_ENABLE:
+                               if (rd->rpl[i].last_power_limit)
+                                       rapl_write_data_raw(rd,
+                                               POWER_LIMIT2,
+                                               rd->rpl[i].last_power_limit);
+                               break;
+                       }
+               }
+       }
+       put_online_cpus();
+}
+
+static int rapl_pm_callback(struct notifier_block *nb,
+       unsigned long mode, void *_unused)
+{
+       switch (mode) {
+       case PM_SUSPEND_PREPARE:
+               power_limit_state_save();
+               break;
+       case PM_POST_SUSPEND:
+               power_limit_state_restore();
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_pm_notifier = {
+       .notifier_call = rapl_pm_callback,
+};
+
 static int __init rapl_init(void)
 {
        const struct x86_cpu_id *id;
@@ -1560,8 +1648,16 @@ static int __init rapl_init(void)
 
        /* Don't bail out if PSys is not supported */
        rapl_register_psys();
+
+       ret = register_pm_notifier(&rapl_pm_notifier);
+       if (ret)
+               goto err_unreg_all;
+
        return 0;
 
+err_unreg_all:
+       cpuhp_remove_state(pcap_rapl_online);
+
 err_unreg:
        rapl_unregister_powercap();
        return ret;
@@ -1569,6 +1665,7 @@ err_unreg:
 
 static void __exit rapl_exit(void)
 {
+       unregister_pm_notifier(&rapl_pm_notifier);
        cpuhp_remove_state(pcap_rapl_online);
        rapl_unregister_powercap();
 }
index 5b10b50..64b2b25 100644 (file)
@@ -673,15 +673,13 @@ EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
 
 static int __init powercap_init(void)
 {
-       int result = 0;
+       int result;
 
        result = seed_constraint_attributes();
        if (result)
                return result;
 
-       result = class_register(&powercap_class);
-
-       return result;
+       return class_register(&powercap_class);
 }
 
 device_initcall(powercap_init);
index e5225ad..2fdab40 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the S/390 specific device drivers
 #
index 31f014b..bc27d71 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 comment "S/390 block device drivers"
        depends on S390 && BLOCK
 
index 0f1ff08..d4e8dff 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
@@ -758,7 +759,7 @@ static void dasd_profile_end_add_data(struct dasd_profile_info *data,
        /* in case of an overflow, reset the whole profile */
        if (data->dasd_io_reqs == UINT_MAX) {
                        memset(data, 0, sizeof(*data));
-                       getnstimeofday(&data->starttod);
+                       ktime_get_real_ts64(&data->starttod);
        }
        data->dasd_io_reqs++;
        data->dasd_io_sects += sectors;
@@ -893,7 +894,7 @@ void dasd_profile_reset(struct dasd_profile *profile)
                return;
        }
        memset(data, 0, sizeof(*data));
-       getnstimeofday(&data->starttod);
+       ktime_get_real_ts64(&data->starttod);
        spin_unlock_bh(&profile->lock);
 }
 
@@ -910,7 +911,7 @@ int dasd_profile_on(struct dasd_profile *profile)
                kfree(data);
                return 0;
        }
-       getnstimeofday(&data->starttod);
+       ktime_get_real_ts64(&data->starttod);
        profile->data = data;
        spin_unlock_bh(&profile->lock);
        return 0;
@@ -994,8 +995,8 @@ static void dasd_stats_array(struct seq_file *m, unsigned int *array)
 static void dasd_stats_seq_print(struct seq_file *m,
                                 struct dasd_profile_info *data)
 {
-       seq_printf(m, "start_time %ld.%09ld\n",
-                  data->starttod.tv_sec, data->starttod.tv_nsec);
+       seq_printf(m, "start_time %lld.%09ld\n",
+                  (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
        seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
        seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
        seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
index c94b606..ee14d8e 100644 (file)
@@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
                erp = dasd_3990_erp_handle_match_erp(cqr, erp);
        }
 
+
+       /*
+        * For path verification work we need to stick with the path that was
+        * originally chosen so that the per path configuration data is
+        * assigned correctly.
+        */
+       if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+               erp->lpm = cqr->lpm;
+       }
+
        if (device->features & DASD_FEATURE_ERPLOG) {
                /* print current erp_chain */
                dev_err(&device->cdev->dev,
index c95a478..e7cd28f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
index 98fb28e..f035c2f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Based on.......: linux/drivers/s390/block/mdisk.c
index 8eafcd5..a2edf2a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
@@ -530,10 +531,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
        pfxdata->validity.define_extent = 1;
 
        /* private uid is kept up to date, conf_data may be outdated */
-       if (startpriv->uid.type != UA_BASE_DEVICE) {
+       if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
                pfxdata->validity.verify_base = 1;
-               if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
-                       pfxdata->validity.hyper_pav = 1;
+
+       if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+               pfxdata->validity.verify_base = 1;
+               pfxdata->validity.hyper_pav = 1;
        }
 
        rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
@@ -3414,10 +3417,12 @@ static int prepare_itcw(struct itcw *itcw,
        pfxdata.validity.define_extent = 1;
 
        /* private uid is kept up to date, conf_data may be outdated */
-       if (startpriv->uid.type != UA_BASE_DEVICE) {
+       if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+               pfxdata.validity.verify_base = 1;
+
+       if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
                pfxdata.validity.verify_base = 1;
-               if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
-                       pfxdata.validity.hyper_pav = 1;
+               pfxdata.validity.hyper_pav = 1;
        }
 
        switch (cmd) {
index 6168ccd..a6b132f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
index b095a23..96709b1 100644 (file)
@@ -441,7 +441,7 @@ struct dasd_profile_info {
        unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
 
        /* new data */
-       struct timespec starttod;          /* time of start or last reset */
+       struct timespec64 starttod;        /* time of start or last reset */
        unsigned int dasd_io_alias;        /* requests using an alias */
        unsigned int dasd_io_tpm;          /* requests using transport mode */
        unsigned int dasd_read_reqs;       /* total number of read  requests */
index 7abb240..6aaefb7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * dcssblk.c -- the S/390 block driver for dcss memory
  *
index eb51893..b4130c7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Block driver for s390 storage class memory.
  *
index 571a070..2a6334c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Xpram.c -- the S/390 expanded memory RAM-disk
  *           
index 97c4c9f..ab0b243 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 comment "S/390 character device drivers"
        depends on S390
 
index 05ac6ba..614b44e 100644 (file)
@@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o       += $(CC_FLAGS_MARCH)
 CFLAGS_sclp_early_core.o               += -march=z900
 endif
 
+CFLAGS_sclp_early_core.o               += -D__NO_FORTIFY
+
 obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
         sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
         sclp_early.o sclp_early_core.o
index 353b3f2..f4c0956 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 # Default keymap for 3270 (ebcdic codepage 037).
 keymaps 0-1,4-5
 
index c451816..6182248 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * IBM/3270 Driver - fullscreen driver.
  *
index 251a318..1447d08 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    HMC Drive DVD Module
  *
index 027ac6a..bf4ab4e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Character device driver for reading z/VM *MONITOR service records.
  *
index 571a7e3..76c158c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Character device driver for writing z/VM *MONITOR service records.
  *
index 5d4f053..f8cd293 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * IBM/3270 Driver - core functions.
  *
index 19c2542..ee6f3b5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Enable Asynchronous Notification via SCLP.
  *
index de69f0d..6d73ee3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    tape device discipline for 3480/3490 tapes.
  *
index e352047..37e65a0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    tape device discipline for 3590 tapes.
  *
index e7d2304..a071024 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2004
  *
index 32503a6..8d3370d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    basic function of the tape device driver
  *
index e417ccd..1c98023 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    IBM/3270 Driver - tty functions.
  *
index 62559dc..069b9ef 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *     character device driver for reading z/VM system service records
  *
index fa90ef0..52aa894 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux driver for System z and s390 unit record devices
  * (z/VM virtual punch, reader, printer)
index aaed778..4369662 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  * zcore module to export memory content and register sets for creating system
  * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
@@ -7,7 +8,6 @@
  *
  * Copyright IBM Corp. 2003, 2008
  * Author(s): Michael Holzheu
- * License: GPL
  */
 
 #define KMSG_COMPONENT "zdump"
index 95e25c1..140e3e4 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef S390_BLACKLIST_H
 #define S390_BLACKLIST_H
 
index e2f7b6e..bfec148 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  bus driver for ccwgroup
  *
index f4166f8..5c94a3a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 1999, 2010
  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
index 7b0b295..c08fc5a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   S/390 common I/O routines -- channel subsystem call
  *
index 8e7e19b..0015729 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for s390 chsc subchannels
  *
index 8921617..987bf9a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   S/390 common I/O routines -- low level i/o calls
  *
index 7d59230..5e495c6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Linux on zSeries Channel Measurement Facility support
  *
@@ -7,20 +8,6 @@
  *         Cornelia Huck <cornelia.huck@de.ibm.com>
  *
  * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "cio"
index d3e504c..0f11dce 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * driver for channel subsystem
  *
@@ -5,8 +6,6 @@
  *
  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *           Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
index 318d826..75a245f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  *  bus driver for ccw devices
  *
@@ -5,8 +6,6 @@
  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *              Cornelia Huck (cornelia.huck@de.ibm.com)
  *              Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
index dd7d79d..1319122 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * finite state machine for device handling
  *
index cf8c4ac..1caf6a3 100644 (file)
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  * Copyright IBM Corp. 2002, 2009
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  *           Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
  */
 #include <linux/export.h>
 #include <linux/init.h>
index ce16e4f..53468ae 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for s390 eadm subchannels
  *
index c592087..77fde9f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Functions for registration of I/O interruption subclasses on s390.
  *
index ed4852f..95b0efe 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux for s390 qdio support, buffer handling, qdio API and module support.
  *
@@ -430,8 +431,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
        q->qdio_error = QDIO_ERROR_SLSB_STATE;
 
        /* special handling for no target buffer empty */
-       if ((!q->is_input_q &&
-           (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
+       if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
+           q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
                qperf_inc(q, target_full);
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
                              q->first_to_check);
@@ -535,7 +536,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
        case SLSB_P_INPUT_ERROR:
                process_buffer_error(q, count);
                q->first_to_check = add_buf(q->first_to_check, count);
-               atomic_sub(count, &q->nr_buf_used);
+               if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+                       qperf_inc(q, inbound_queue_full);
                if (q->irq_ptr->perf_stat_enabled)
                        account_sbals_error(q, count);
                break;
index 9ae1380..98f3cfd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * qdio queue initialization
  *
index 1fa53ec..6bca1d5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Recognize and maintain s390 storage class memory.
  *
index 82f05c4..ea6a2d0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * VFIO based Physical Subchannel device driver
  *
index faeba9d..48d55dc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright IBM Corp. 2006, 2012
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
  *           Holger Dengler <hd@linux.vnet.ibm.com>
  *
  * Adjunct processor bus.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "ap"
index 7e45c4d..e0827ea 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright IBM Corp. 2006, 2012
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
  *           Holger Dengler <hd@linux.vnet.ibm.com>
  *
  * Adjunct processor bus header file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _AP_BUS_H_
index 8dda5bb..e7c2e4f 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  pkey device driver
  *
  *  Copyright IBM Corp. 2017
  *  Author(s): Harald Freudenberger
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #define KMSG_COMPONENT "pkey"
index b5f4006..ce15f10 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
@@ -218,8 +205,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
        weight += atomic_read(&zq->load);
        pref_weight += atomic_read(&pref_zq->load);
        if (weight == pref_weight)
-               return &zq->queue->total_request_count >
-                       &pref_zq->queue->total_request_count;
+               return zq->queue->total_request_count >
+                       pref_zq->queue->total_request_count;
        return weight > pref_weight;
 }
 
index 73541a7..9fff891 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_API_H_
index f85dacf..233e1e6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
index 12cff62..011d61d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_CCA_KEY_H_
index b97c5d5..e701194 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index 0dce4b9..c3c1167 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_CEX2A_H_
index e2eebc7..f305538 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Copyright IBM Corp. 2012
  *  Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
index 13df602..01598d8 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_ERROR_H_
index db5bde4..afe1b2b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "zcrypt"
index 5cc2803..0a36545 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_MSGTYPE50_H_
index 785620d..f54bef4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "zcrypt"
index 7a0d5b5..d314f45 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_MSGTYPE6_H_
index 6006047..159b0a0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index eacafc8..d678a3a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_PCIXCC_H_
index 4742be0..720434e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
index b2837b1..a782a20 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 menu "S/390 network device drivers"
        depends on NETDEVICES && S390
 
index be9f172..7ce98b7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2001, 2009
  * Author(s):
index c81adf8..eb07862 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /**
  * A generic FSM based on fsm used in isdn4linux
  *
index e131a03..92ae84a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Linux for S/390 Lan Channel Station Network Driver
  *
@@ -7,20 +8,6 @@
  *            Rewritten by
  *                     Frank Pavlic <fpavlic@de.ibm.com> and
  *                     Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT         "lcs"
index b9c7c1e..5ce2424 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * IUCV network driver
  *
  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  */
 
 #define KMSG_COMPONENT "netiucv"
index 9cd569e..badf42a 100644 (file)
@@ -565,9 +565,9 @@ enum qeth_cq {
 };
 
 struct qeth_ipato {
-       int enabled;
-       int invert4;
-       int invert6;
+       bool enabled;
+       bool invert4;
+       bool invert6;
        struct list_head entries;
 };
 
@@ -987,6 +987,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
 int qeth_set_features(struct net_device *, netdev_features_t);
 void qeth_recover_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features);
 int qeth_vm_request_mac(struct qeth_card *card);
 int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
 
index 49b9efe..3614df6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 #include <linux/mii.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
 
@@ -1474,9 +1480,9 @@ static int qeth_setup_card(struct qeth_card *card)
        qeth_set_intial_options(card);
        /* IP address takeover */
        INIT_LIST_HEAD(&card->ipato.entries);
-       card->ipato.enabled = 0;
-       card->ipato.invert4 = 0;
-       card->ipato.invert6 = 0;
+       card->ipato.enabled = false;
+       card->ipato.invert4 = false;
+       card->ipato.invert6 = false;
        /* init QDIO stuff */
        qeth_init_qdio_info(card);
        INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
@@ -5380,6 +5386,13 @@ out:
 }
 EXPORT_SYMBOL_GPL(qeth_poll);
 
+static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+       if (!cmd->hdr.return_code)
+               cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+       return cmd->hdr.return_code;
+}
+
 int qeth_setassparms_cb(struct qeth_card *card,
                        struct qeth_reply *reply, unsigned long data)
 {
@@ -6236,7 +6249,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
                                (struct qeth_checksum_cmd *)reply->param;
 
        QETH_CARD_TEXT(card, 4, "chkdoccb");
-       if (cmd->hdr.return_code)
+       if (qeth_setassparms_inspect_rc(cmd))
                return 0;
 
        memset(chksum_cb, 0, sizeof(*chksum_cb));
@@ -6438,6 +6451,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(qeth_fix_features);
 
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features)
+{
+       /* GSO segmentation builds skbs with
+        *      a (small) linear part for the headers, and
+        *      page frags for the data.
+        * Compared to a linear skb, the header-only part consumes an
+        * additional buffer element. This reduces buffer utilization, and
+        * hurts throughput. So compress small segments into one element.
+        */
+       if (netif_needs_gso(skb, features)) {
+               /* match skb_segment(): */
+               unsigned int doffset = skb->data - skb_mac_header(skb);
+               unsigned int hsize = skb_shinfo(skb)->gso_size;
+               unsigned int hroom = skb_headroom(skb);
+
+               /* linearize only if resulting skb allocations are order-0: */
+               if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+                       features &= ~NETIF_F_SG;
+       }
+
+       return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
 static int __init qeth_core_init(void)
 {
        int rc;
index b22ed2a..ae81534 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
index d2537c0..5863ea1 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -960,6 +961,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_stop               = qeth_l2_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l2_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l2_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -1010,6 +1012,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
                card->dev->hw_features = NETIF_F_SG;
                card->dev->vlan_features = NETIF_F_SG;
+               card->dev->features |= NETIF_F_SG;
                /* OSA 3S and earlier has no RX/TX support */
                if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
                        card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1028,8 +1031,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
index 194ae9b..e583383 100644 (file)
@@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
                        const u8 *);
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+void qeth_l3_update_ipato(struct qeth_card *card);
 struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
 int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
 int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
index aadd384..ef0961e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -163,8 +164,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
        }
 }
 
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
-                                               struct qeth_ipaddr *addr)
+static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+                                            struct qeth_ipaddr *addr)
 {
        struct qeth_ipato_entry *ipatoe;
        u8 addr_bits[128] = {0, };
@@ -173,6 +174,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
 
        if (!card->ipato.enabled)
                return 0;
+       if (addr->type != QETH_IP_TYPE_NORMAL)
+               return 0;
 
        qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
                                  (addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -289,8 +292,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
                memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
                addr->ref_counter = 1;
 
-               if (addr->type == QETH_IP_TYPE_NORMAL  &&
-                               qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+               if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
                        QETH_CARD_TEXT(card, 2, "tkovaddr");
                        addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
                }
@@ -604,6 +606,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
 /*
  * IP address takeover related functions
  */
+
+/**
+ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
+ *
+ * Caller must hold ip_lock.
+ */
+void qeth_l3_update_ipato(struct qeth_card *card)
+{
+       struct qeth_ipaddr *addr;
+       unsigned int i;
+
+       hash_for_each(card->ip_htable, i, addr, hnode) {
+               if (addr->type != QETH_IP_TYPE_NORMAL)
+                       continue;
+               if (qeth_l3_is_addr_covered_by_ipato(card, addr))
+                       addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+               else
+                       addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+       }
+}
+
 static void qeth_l3_clear_ipato_list(struct qeth_card *card)
 {
        struct qeth_ipato_entry *ipatoe, *tmp;
@@ -615,6 +638,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
                kfree(ipatoe);
        }
 
+       qeth_l3_update_ipato(card);
        spin_unlock_bh(&card->ip_lock);
 }
 
@@ -639,8 +663,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
                }
        }
 
-       if (!rc)
+       if (!rc) {
                list_add_tail(&new->entry, &card->ipato.entries);
+               qeth_l3_update_ipato(card);
+       }
 
        spin_unlock_bh(&card->ip_lock);
 
@@ -663,6 +689,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
                            (proto == QETH_PROT_IPV4)? 4:16) &&
                    (ipatoe->mask_bits == mask_bits)) {
                        list_del(&ipatoe->entry);
+                       qeth_l3_update_ipato(card);
                        kfree(ipatoe);
                }
        }
@@ -1376,6 +1403,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 
                tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
                memcpy(tmp->mac, buf, sizeof(tmp->mac));
+               tmp->is_multicast = 1;
 
                ipm = qeth_l3_ip_from_hash(card, tmp);
                if (ipm) {
@@ -2917,6 +2945,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_stop               = qeth_l3_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -2957,6 +2986,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                card->dev->vlan_features = NETIF_F_SG |
                                        NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
                                        NETIF_F_TSO;
+                               card->dev->features |= NETIF_F_SG;
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2984,8 +3014,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
        netif_keep_dst(card->dev);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
+       netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+                                         PAGE_SIZE);
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
index bd12fdf..6ea2b52 100644 (file)
@@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
-       struct qeth_ipaddr *addr;
-       int i, rc = 0;
+       bool enable;
+       int rc = 0;
 
        if (!card)
                return -EINVAL;
@@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
        }
 
        if (sysfs_streq(buf, "toggle")) {
-               card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
-       } else if (sysfs_streq(buf, "1")) {
-               card->ipato.enabled = 1;
-               hash_for_each(card->ip_htable, i, addr, hnode) {
-                               if ((addr->type == QETH_IP_TYPE_NORMAL) &&
-                               qeth_l3_is_addr_covered_by_ipato(card, addr))
-                                       addr->set_flags |=
-                                       QETH_IPA_SETIP_TAKEOVER_FLAG;
-                       }
-       } else if (sysfs_streq(buf, "0")) {
-               card->ipato.enabled = 0;
-               hash_for_each(card->ip_htable, i, addr, hnode) {
-                       if (addr->set_flags &
-                       QETH_IPA_SETIP_TAKEOVER_FLAG)
-                               addr->set_flags &=
-                               ~QETH_IPA_SETIP_TAKEOVER_FLAG;
-                       }
-       } else
+               enable = !card->ipato.enabled;
+       } else if (kstrtobool(buf, &enable)) {
                rc = -EINVAL;
+               goto out;
+       }
+
+       if (card->ipato.enabled != enable) {
+               card->ipato.enabled = enable;
+               spin_lock_bh(&card->ip_lock);
+               qeth_l3_update_ipato(card);
+               spin_unlock_bh(&card->ip_lock);
+       }
 out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
@@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
                                const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       bool invert;
        int rc = 0;
 
        if (!card)
                return -EINVAL;
 
        mutex_lock(&card->conf_mutex);
-       if (sysfs_streq(buf, "toggle"))
-               card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
-       else if (sysfs_streq(buf, "1"))
-               card->ipato.invert4 = 1;
-       else if (sysfs_streq(buf, "0"))
-               card->ipato.invert4 = 0;
-       else
+       if (sysfs_streq(buf, "toggle")) {
+               invert = !card->ipato.invert4;
+       } else if (kstrtobool(buf, &invert)) {
                rc = -EINVAL;
+               goto out;
+       }
+
+       if (card->ipato.invert4 != invert) {
+               card->ipato.invert4 = invert;
+               spin_lock_bh(&card->ip_lock);
+               qeth_l3_update_ipato(card);
+               spin_unlock_bh(&card->ip_lock);
+       }
+out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
@@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       bool invert;
        int rc = 0;
 
        if (!card)
                return -EINVAL;
 
        mutex_lock(&card->conf_mutex);
-       if (sysfs_streq(buf, "toggle"))
-               card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
-       else if (sysfs_streq(buf, "1"))
-               card->ipato.invert6 = 1;
-       else if (sysfs_streq(buf, "0"))
-               card->ipato.invert6 = 0;
-       else
+       if (sysfs_streq(buf, "toggle")) {
+               invert = !card->ipato.invert6;
+       } else if (kstrtobool(buf, &invert)) {
                rc = -EINVAL;
+               goto out;
+       }
+
+       if (card->ipato.invert6 != invert) {
+               card->ipato.invert6 = invert;
+               spin_lock_bh(&card->ip_lock);
+               qeth_l3_update_ipato(card);
+               spin_unlock_bh(&card->ip_lock);
+       }
+out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
index a851d34..3b0c8b8 100644 (file)
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * IUCV special message driver
  *
  * Copyright IBM Corp. 2003, 2009
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index 32515a2..0a26399 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Deliver z/VM CP special messages (SMSG) as uevents.
  *
index 9259039..9dda431 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the S/390 specific device drivers
 #
index 8475215..a3a8c8d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * zfcp device driver
  *
index f68af1f..2dc4d9a 100644 (file)
@@ -1,9 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
 # Makefile for kvm guest drivers on s390
 #
 # Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
 
 obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
index b18fe20..ba2e085 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * ccw based virtio transport
  *
  * Copyright IBM Corp. 2012, 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 
index 403a639..d522654 100644 (file)
@@ -1673,6 +1673,7 @@ struct aac_dev
        struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
        u8                      adapter_shutdown;
        u32                     handle_pci_error;
+       bool                    init_reset;
 };
 
 #define aac_adapter_interrupt(dev) \
@@ -1724,6 +1725,7 @@ struct aac_dev
 #define FIB_CONTEXT_FLAG_NATIVE_HBA            (0x00000010)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF        (0x00000020)
 #define FIB_CONTEXT_FLAG_SCSI_CMD      (0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET      (0x00000080)
 
 /*
  *     Define the command values
index 525a652..80a8cb2 100644 (file)
@@ -467,35 +467,6 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
        return 0;
 }
 
-#ifdef CONFIG_EEH
-static inline int aac_check_eeh_failure(struct aac_dev *dev)
-{
-       /* Check for an EEH failure for the given
-        * device node. Function eeh_dev_check_failure()
-        * returns 0 if there has not been an EEH error
-        * otherwise returns a non-zero value.
-        *
-        * Need to be called before any PCI operation,
-        * i.e.,before aac_adapter_check_health()
-        */
-       struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
-
-       if (eeh_dev_check_failure(edev)) {
-               /* The EEH mechanisms will handle this
-                * error and reset the device if
-                * necessary.
-                */
-               return 1;
-       }
-       return 0;
-}
-#else
-static inline int aac_check_eeh_failure(struct aac_dev *dev)
-{
-       return 0;
-}
-#endif
-
 /*
  *     Define the highest level of host to adapter communication routines.
  *     These routines will support host to adapter FS commuication. These
@@ -701,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                                        return -ETIMEDOUT;
                                }
 
-                               if (aac_check_eeh_failure(dev))
+                               if (unlikely(pci_channel_offline(dev->pdev)))
                                        return -EFAULT;
 
                                if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -801,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-               if (aac_check_eeh_failure(dev))
+               if (unlikely(pci_channel_offline(dev->pdev)))
                        return -EFAULT;
 
                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
@@ -1583,6 +1554,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
         * will ensure that i/o is queisced and the card is flushed in that
         * case.
         */
+       aac_free_irq(aac);
        aac_fib_map_free(aac);
        dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
                          aac->comm_phys);
@@ -1590,7 +1562,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
        aac->comm_phys = 0;
        kfree(aac->queues);
        aac->queues = NULL;
-       aac_free_irq(aac);
        kfree(aac->fsa_dev);
        aac->fsa_dev = NULL;
 
@@ -2511,8 +2482,8 @@ int aac_command_thread(void *data)
                        /* Synchronize our watches */
                        if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
                         && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
-                               difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
-                                 + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
+                               difference = HZ + HZ / 2 -
+                                            now.tv_nsec / (NSEC_PER_SEC / HZ);
                        else {
                                if (now.tv_nsec > NSEC_PER_SEC / 2)
                                        ++now.tv_sec;
@@ -2536,6 +2507,10 @@ int aac_command_thread(void *data)
                if (kthread_should_stop())
                        break;
 
+               /*
+                * we probably want usleep_range() here instead of the
+                * jiffies computation
+                */
                schedule_timeout(difference);
 
                if (kthread_should_stop())
index c9252b1..d55332d 100644 (file)
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
                        info = &aac->hba_map[bus][cid];
                        if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
                            info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
-                               fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+                               fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
                                cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
                        }
                }
@@ -1680,6 +1680,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        aac->cardtype = index;
        INIT_LIST_HEAD(&aac->entry);
 
+       if (aac_reset_devices || reset_devices)
+               aac->init_reset = true;
+
        aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
        if (!aac->fibs)
                goto out_free_host;
index 93ef7c3..6201666 100644 (file)
@@ -561,11 +561,16 @@ int _aac_rx_init(struct aac_dev *dev)
        dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
        dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
-       if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
-         !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               /* Make sure the Hardware FIFO is empty */
-               while ((++restart < 512) &&
-                 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+
+       if (((status & 0x0c) != 0x0c) || dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
+                       /* Make sure the Hardware FIFO is empty */
+                       while ((++restart < 512) &&
+                              (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+               }
+       }
+
        /*
         *      Check to see if the board panic'd while booting.
         */
index 0c9361c..fde6b6a 100644 (file)
@@ -868,9 +868,13 @@ int aac_src_init(struct aac_dev *dev)
        /* Failure to reset here is an option ... */
        dev->a_ops.adapter_sync_cmd = src_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
-       if ((aac_reset_devices || reset_devices) &&
-               !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               ++restart;
+
+       if (dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+                       ++restart;
+       }
+
        /*
         *      Check to see if the board panic'd while booting.
         */
@@ -1014,9 +1018,13 @@ int aac_srcv_init(struct aac_dev *dev)
        /* Failure to reset here is an option ... */
        dev->a_ops.adapter_sync_cmd = src_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
-       if ((aac_reset_devices || reset_devices) &&
-               !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               ++restart;
+
+       if (dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+                       ++restart;
+       }
+
        /*
         *      Check to see if flash update is running.
         *      Wait for the adapter to be up and running. Wait up to 5 minutes
index 72ca2a2..b2fa195 100644 (file)
@@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
        struct fc_bsg_request *bsg_request = job->request;
        struct fc_bsg_reply *bsg_reply = job->reply;
        uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
-       struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
+       struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
        struct bfad_s *bfad = im_port->bfad;
        void *payload_kbuf;
        int rc = -EINVAL;
@@ -3350,7 +3351,8 @@ int
 bfad_im_bsg_els_ct_request(struct bsg_job *job)
 {
        struct bfa_bsg_data *bsg_data;
-       struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
+       struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
        struct bfad_s *bfad = im_port->bfad;
        bfa_bsg_fcpt_t *bsg_fcpt;
        struct bfad_fcxp    *drv_fcxp;
index 24e657a..c05d6e9 100644 (file)
@@ -546,6 +546,7 @@ int
 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
                        struct device *dev)
 {
+       struct bfad_im_port_pointer *im_portp;
        int error = 1;
 
        mutex_lock(&bfad_mutex);
@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
                goto out_free_idr;
        }
 
-       im_port->shost->hostdata[0] = (unsigned long)im_port;
+       im_portp = shost_priv(im_port->shost);
+       im_portp->p = im_port;
        im_port->shost->unique_id = im_port->idr_id;
        im_port->shost->this_id = -1;
        im_port->shost->max_id = MAX_FCP_TARGET;
@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 
        sht->sg_tablesize = bfad->cfg_data.io_max_sge;
 
-       return scsi_host_alloc(sht, sizeof(unsigned long));
+       return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
 }
 
 void
index c81ec2a..06ce4ba 100644 (file)
@@ -69,6 +69,16 @@ struct bfad_im_port_s {
        struct fc_vport *fc_vport;
 };
 
+struct bfad_im_port_pointer {
+       struct bfad_im_port_s *p;
+};
+
+static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
+{
+       struct bfad_im_port_pointer *im_portp = shost_priv(host);
+       return im_portp->p;
+}
+
 enum bfad_itnim_state {
        ITNIM_STATE_NONE,
        ITNIM_STATE_ONLINE,
index 5da4605..21be672 100644 (file)
@@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
                case ELS_FLOGI:
                        if (!lport->point_to_multipoint)
                                fc_lport_recv_flogi_req(lport, fp);
+                       else
+                               fc_rport_recv_req(lport, fp);
                        break;
                case ELS_LOGO:
                        if (fc_frame_sid(fp) == FC_FID_FLOGI)
                                fc_lport_recv_logo_req(lport, fp);
+                       else
+                               fc_rport_recv_req(lport, fp);
                        break;
                case ELS_RSCN:
                        lport->tt.disc_recv_req(lport, fp);
index ca15662..3183d63 100644 (file)
@@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
                struct sas_rphy *rphy)
 {
        struct domain_device *dev;
-       unsigned int reslen = 0;
+       unsigned int rcvlen = 0;
        int ret = -EINVAL;
 
        /* no rphy means no smp target support (ie aic94xx host) */
@@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
 
        ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
                        job->reply_payload.sg_list);
-       if (ret > 0) {
-               /* positive number is the untransferred residual */
-               reslen = ret;
+       if (ret >= 0) {
+               /* bsg_job_done() requires the length received  */
+               rcvlen = job->reply_payload.payload_len - ret;
                ret = 0;
        }
 
 out:
-       bsg_job_done(job, ret, reslen);
+       bsg_job_done(job, ret, rcvlen);
 }
index 58476b7..c940685 100644 (file)
@@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
 
 int sas_eh_abort_handler(struct scsi_cmnd *cmd)
 {
-       int res;
+       int res = TMF_RESP_FUNC_FAILED;
        struct sas_task *task = TO_SAS_TASK(cmd);
        struct Scsi_Host *host = cmd->device->host;
+       struct domain_device *dev = cmd_to_domain_dev(cmd);
        struct sas_internal *i = to_sas_internal(host->transportt);
+       unsigned long flags;
 
        if (!i->dft->lldd_abort_task)
                return FAILED;
 
-       res = i->dft->lldd_abort_task(task);
+       spin_lock_irqsave(host->host_lock, flags);
+       /* We cannot do async aborts for SATA devices */
+       if (dev_is_sata(dev) && !host->host_eh_scheduled) {
+               spin_unlock_irqrestore(host->host_lock, flags);
+               return FAILED;
+       }
+       spin_unlock_irqrestore(host->host_lock, flags);
+
+       if (task)
+               res = i->dft->lldd_abort_task(task);
+       else
+               SAS_DPRINTK("no task to abort\n");
        if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
                return SUCCESS;
 
index 56faeb0..87c08ff 100644 (file)
@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
        drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
        rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
        if (rc < 0) {
-               (rqbp->rqb_free_buffer)(phba, rqb_entry);
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "6409 Cannot post to RQ %d: %x %x\n",
                                rqb_entry->hrq->queue_id,
                                rqb_entry->hrq->host_index,
                                rqb_entry->hrq->hba_index);
+               (rqbp->rqb_free_buffer)(phba, rqb_entry);
        } else {
                list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
                rqbp->buffer_count++;
index a4f28b7..e188771 100644 (file)
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
                return req;
 
        for_each_bio(bio) {
-               ret = blk_rq_append_bio(req, bio);
+               struct bio *bounce_bio = bio;
+
+               ret = blk_rq_append_bio(req, &bounce_bio);
                if (ret)
                        return ERR_PTR(ret);
        }
index 01f08c0..c3765d2 100644 (file)
@@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq)
 {
        struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
        int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
-       char buf[80];
+       const u8 *const cdb = READ_ONCE(cmd->cmnd);
+       char buf[80] = "(?)";
 
-       __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
+       if (cdb)
+               __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
        seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
                   cmd->retries, msecs / 1000, msecs % 1000);
 }
index 78d4aa8..dfb8da8 100644 (file)
@@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
 };
 
 
-static const char spaces[] = "                "; /* 16 of them */
 static blist_flags_t scsi_default_dev_flags;
 static LIST_HEAD(scsi_dev_info_list);
 static char scsi_dev_flags[256];
@@ -298,20 +297,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
        size_t from_length;
 
        from_length = strlen(from);
-       strncpy(to, from, min(to_length, from_length));
-       if (from_length < to_length) {
-               if (compatible) {
-                       /*
-                        * NUL terminate the string if it is short.
-                        */
-                       to[from_length] = '\0';
-               } else {
-                       /*
-                        * space pad the string if it is short.
-                        */
-                       strncpy(&to[from_length], spaces,
-                               to_length - from_length);
-               }
+       /* This zero-pads the destination */
+       strncpy(to, from, to_length);
+       if (from_length < to_length && !compatible) {
+               /*
+                * space pad the string if it is short.
+                */
+               memset(&to[from_length], ' ', to_length - from_length);
        }
        if (from_length > to_length)
                 printk(KERN_WARNING "%s: %s string '%s' is too long\n",
@@ -382,10 +374,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
                            model, compatible);
 
        if (strflags)
-               devinfo->flags = simple_strtoul(strflags, NULL, 0);
-       else
-               devinfo->flags = flags;
-
+               flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+       devinfo->flags = flags;
        devinfo->compatible = compatible;
 
        if (compatible)
@@ -458,7 +448,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
                        /*
                         * vendor strings must be an exact match
                         */
-                       if (vmax != strlen(devinfo->vendor) ||
+                       if (vmax != strnlen(devinfo->vendor,
+                                           sizeof(devinfo->vendor)) ||
                            memcmp(devinfo->vendor, vskip, vmax))
                                continue;
 
@@ -466,7 +457,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
                         * @model specifies the full string, and
                         * must be larger or equal to devinfo->model
                         */
-                       mlen = strlen(devinfo->model);
+                       mlen = strnlen(devinfo->model, sizeof(devinfo->model));
                        if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
                                continue;
                        return devinfo;
index 1cbc497..d9ca1df 100644 (file)
@@ -1967,6 +1967,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
 out_put_device:
        put_device(&sdev->sdev_gendev);
 out:
+       if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
+               blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
        return false;
 }
 
@@ -2148,11 +2150,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
                q->limits.cluster = 0;
 
        /*
-        * set a reasonable default alignment on word boundaries: the
-        * host and device may alter it using
-        * blk_queue_update_dma_alignment() later.
+        * Set a reasonable default alignment:  The larger of 32-byte (dword),
+        * which is a common minimum for HBAs, and the minimum DMA alignment,
+        * which is set by the platform.
+        *
+        * Devices that require a bigger alignment can increase it later.
         */
-       blk_queue_dma_alignment(q, 0x03);
+       blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
 }
 EXPORT_SYMBOL_GPL(__scsi_init_queue);
 
index be5e919..0880d97 100644 (file)
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
  *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
-               int *bflags, int async)
+               blist_flags_t *bflags, int async)
 {
        int ret;
 
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
  *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_probe_and_add_lun(struct scsi_target *starget,
-                                 u64 lun, int *bflagsp,
+                                 u64 lun, blist_flags_t *bflagsp,
                                  struct scsi_device **sdevp,
                                  enum scsi_scan_mode rescan,
                                  void *hostdata)
 {
        struct scsi_device *sdev;
        unsigned char *result;
-       int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+       blist_flags_t bflags;
+       int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 
        /*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
  *     Modifies sdevscan->lun.
  **/
 static void scsi_sequential_lun_scan(struct scsi_target *starget,
-                                    int bflags, int scsi_level,
+                                    blist_flags_t bflags, int scsi_level,
                                     enum scsi_scan_mode rescan)
 {
        uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
  *     0: scan completed (or no memory, so further scanning is futile)
  *     1: could not scan with REPORT LUN
  **/
-static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
                                enum scsi_scan_mode rescan)
 {
        unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
                unsigned int id, u64 lun, enum scsi_scan_mode rescan)
 {
        struct Scsi_Host *shost = dev_to_shost(parent);
-       int bflags = 0;
+       blist_flags_t bflags = 0;
        int res;
        struct scsi_target *starget;
 
index 50e7d7e..26ce171 100644 (file)
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
 
-#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+#define BLIST_FLAG_NAME(name)                                  \
+       [ilog2((__force unsigned int)BLIST_##name)] = #name
 static const char *const sdev_bflags_name[] = {
 #include "scsi_devinfo_tbl.c"
 };
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
        for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
                const char *name = NULL;
 
-               if (!(sdev->sdev_bflags & BIT(i)))
+               if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
                        continue;
                if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
                        name = sdev_bflags_name[i];
@@ -1414,7 +1415,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
                 * check.
                 */
                if (sdev->channel != starget->channel ||
-                   sdev->id != starget->id ||
+                   sdev->id != starget->id)
+                       continue;
+               if (sdev->sdev_state == SDEV_DEL ||
+                   sdev->sdev_state == SDEV_CANCEL ||
                    !get_device(&sdev->sdev_gendev))
                        continue;
                spin_unlock_irqrestore(shost->host_lock, flags);
index d0219e3..871ea58 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
+#include <linux/suspend.h>
 #include <scsi/scsi.h>
 #include "scsi_priv.h"
 #include <scsi/scsi_device.h>
 
 /* Our blacklist flags */
 enum {
-       SPI_BLIST_NOIUS = 0x1,
+       SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
 };
 
 /* blacklist table, modelled on scsi_devinfo.c */
 static struct {
        char *vendor;
        char *model;
-       unsigned flags;
+       blist_flags_t flags;
 } spi_static_device_list[] __initdata = {
        {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
        {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +222,11 @@ static int spi_device_configure(struct transport_container *tc,
 {
        struct scsi_device *sdev = to_scsi_device(dev);
        struct scsi_target *starget = sdev->sdev_target;
-       unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
-                                                     &sdev->inquiry[16],
-                                                     SCSI_DEVINFO_SPI);
+       blist_flags_t bflags;
+
+       bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+                                            &sdev->inquiry[16],
+                                            SCSI_DEVINFO_SPI);
 
        /* Populate the target capability fields with the values
         * gleaned from the device inquiry */
@@ -1007,11 +1010,20 @@ spi_dv_device(struct scsi_device *sdev)
        u8 *buffer;
        const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
 
+       /*
+        * Because this function and the power management code both call
+        * scsi_device_quiesce(), it is not safe to perform domain validation
+        * while suspend or resume is in progress. Hence the
+        * lock/unlock_system_sleep() calls.
+        */
+       lock_system_sleep();
+
        if (unlikely(spi_dv_in_progress(starget)))
-               return;
+               goto unlock;
 
        if (unlikely(scsi_device_get(sdev)))
-               return;
+               goto unlock;
+
        spi_dv_in_progress(starget) = 1;
 
        buffer = kzalloc(len, GFP_KERNEL);
@@ -1047,6 +1059,8 @@ spi_dv_device(struct scsi_device *sdev)
  out_put:
        spi_dv_in_progress(starget) = 0;
        scsi_device_put(sdev);
+unlock:
+       unlock_system_sleep();
 }
 EXPORT_SYMBOL(spi_dv_device);
 
index 24fe685..a028ab3 100644 (file)
@@ -1312,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
 {
        struct request *rq = SCpnt->request;
+       u8 *cmnd;
 
        if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
                sd_zbc_write_unlock_zone(SCpnt);
@@ -1320,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
                __free_page(rq->special_vec.bv_page);
 
        if (SCpnt->cmnd != scsi_req(rq)->cmd) {
-               mempool_free(SCpnt->cmnd, sd_cdb_pool);
+               cmnd = SCpnt->cmnd;
                SCpnt->cmnd = NULL;
                SCpnt->cmd_len = 0;
+               mempool_free(cmnd, sd_cdb_pool);
        }
 }
 
index 1b06cf0..3b3d1d0 100644 (file)
@@ -953,10 +953,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
                case TEST_UNIT_READY:
                        break;
                default:
-                       set_host_byte(scmnd, DID_TARGET_FAILURE);
+                       set_host_byte(scmnd, DID_ERROR);
                }
                break;
        case SRB_STATUS_INVALID_LUN:
+               set_host_byte(scmnd, DID_NO_CONNECT);
                do_work = true;
                process_err_fn = storvsc_remove_lun;
                break;
index 011c336..a355d98 100644 (file)
@@ -6559,12 +6559,15 @@ static int ufshcd_config_vreg(struct device *dev,
                struct ufs_vreg *vreg, bool on)
 {
        int ret = 0;
-       struct regulator *reg = vreg->reg;
-       const char *name = vreg->name;
+       struct regulator *reg;
+       const char *name;
        int min_uV, uA_load;
 
        BUG_ON(!vreg);
 
+       reg = vreg->reg;
+       name = vreg->name;
+
        if (regulator_count_voltages(reg) > 0) {
                min_uV = on ? vreg->min_uV : 0;
                ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
index 89f4cf5..f2d8c3c 100644 (file)
@@ -20,8 +20,8 @@
 #define AO_SEC_SOCINFO_OFFSET  AO_SEC_SD_CFG8
 
 #define SOCINFO_MAJOR  GENMASK(31, 24)
-#define SOCINFO_MINOR  GENMASK(23, 16)
-#define SOCINFO_PACK   GENMASK(15, 8)
+#define SOCINFO_PACK   GENMASK(23, 16)
+#define SOCINFO_MINOR  GENMASK(15, 8)
 #define SOCINFO_MISC   GENMASK(7, 0)
 
 static const struct meson_gx_soc_id {
index 77fe55c..d653453 100644 (file)
@@ -79,6 +79,7 @@
 #define A3700_SPI_BYTE_LEN             BIT(5)
 #define A3700_SPI_CLK_PRESCALE         BIT(0)
 #define A3700_SPI_CLK_PRESCALE_MASK    (0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS                (0x10)
 
 #define A3700_SPI_WFIFO_THRS_BIT       28
 #define A3700_SPI_RFIFO_THRS_BIT       24
@@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
 
        prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
 
+       /* For prescaler values over 15, we can only set it by steps of 2.
+        * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+        * 30. We only use this range from 16 to 30.
+        */
+       if (prescale > 15)
+               prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
        val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
        val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
 
index f95da36..6694709 100644 (file)
@@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        /* reset the hardware and block queue progress */
-       spin_lock_irq(&as->lock);
        if (as->use_dma) {
                atmel_spi_stop_dma(master);
                atmel_spi_release_dma(master);
        }
 
+       spin_lock_irq(&as->lock);
        spi_writel(as, CR, SPI_BIT(SWRST));
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        spi_readl(as, SR);
index 2ce8757..0835a8d 100644 (file)
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
        /* Sets SPCMD */
        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 
-       /* Enables SPI function in master mode */
-       rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+       /* Sets RSPI mode */
+       rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 
        return 0;
 }
index c5cd635..4141003 100644 (file)
@@ -525,7 +525,7 @@ err_free_master:
 
 static int sun4i_spi_remove(struct platform_device *pdev)
 {
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_force_suspend(&pdev->dev);
 
        return 0;
 }
index bc7100b..e0b9fe1 100644 (file)
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
        while (remaining_words) {
                int n_words, tx_words, rx_words;
                u32 sr;
+               int stalled;
 
                n_words = min(remaining_words, xspi->buffer_size);
 
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 
                /* Read out all the data from the Rx FIFO */
                rx_words = n_words;
+               stalled = 10;
                while (rx_words) {
+                       if (rx_words == n_words && !(stalled--) &&
+                           !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+                           (sr & XSPI_SR_RX_EMPTY_MASK)) {
+                               dev_err(&spi->dev,
+                                       "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+                               xspi_init_hw(xspi);
+                               return -EIO;
+                       }
+
                        if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
                                xilinx_spi_rx(xspi);
                                rx_words--;
index d8e4219..71c7376 100644 (file)
@@ -32,7 +32,7 @@ config SSB_BLOCKIO
 
 config SSB_PCIHOST_POSSIBLE
        bool
-       depends on SSB && (PCI = y || PCI = SSB)
+       depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
        default y
 
 config SSB_PCIHOST
index 0f695df..372ce99 100644 (file)
@@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                break;
        case ASHMEM_SET_SIZE:
                ret = -EINVAL;
+               mutex_lock(&ashmem_mutex);
                if (!asma->file) {
                        ret = 0;
                        asma->size = (size_t)arg;
                }
+               mutex_unlock(&ashmem_mutex);
                break;
        case ASHMEM_GET_SIZE:
                ret = asma->size;
index a517b2d..8f64941 100644 (file)
@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP
 
 config ION_CMA_HEAP
        bool "Ion CMA heap support"
-       depends on ION && CMA
+       depends on ION && DMA_CMA
        help
          Choose this option to enable CMA heaps with Ion. This heap is backed
          by the Contiguous Memory Allocator (CMA). If your system has these
index a7d9b0e..f480885 100644 (file)
@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
        mutex_lock(&buffer->lock);
        list_for_each_entry(a, &buffer->attachments, list) {
                dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
-                                   DMA_BIDIRECTIONAL);
+                                   direction);
        }
        mutex_unlock(&buffer->lock);
 
@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
        mutex_lock(&buffer->lock);
        list_for_each_entry(a, &buffer->attachments, list) {
                dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
-                                      DMA_BIDIRECTIONAL);
+                                      direction);
        }
        mutex_unlock(&buffer->lock);
 
index dd5545d..86196ff 100644 (file)
@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
        struct ion_cma_heap *cma_heap = to_cma_heap(heap);
        struct sg_table *table;
        struct page *pages;
+       unsigned long size = PAGE_ALIGN(len);
+       unsigned long nr_pages = size >> PAGE_SHIFT;
+       unsigned long align = get_order(size);
        int ret;
 
-       pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
+       if (align > CONFIG_CMA_ALIGNMENT)
+               align = CONFIG_CMA_ALIGNMENT;
+
+       pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
        if (!pages)
                return -ENOMEM;
 
@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
        if (ret)
                goto free_mem;
 
-       sg_set_page(table->sgl, pages, len, 0);
+       sg_set_page(table->sgl, pages, size, 0);
 
        buffer->priv_virt = pages;
        buffer->sg_table = table;
@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 free_mem:
        kfree(table);
 err:
-       cma_release(cma_heap->cma, pages, buffer->size);
+       cma_release(cma_heap->cma, pages, nr_pages);
        return -ENOMEM;
 }
 
@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer)
 {
        struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
        struct page *pages = buffer->priv_virt;
+       unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
 
        /* release memory */
-       cma_release(cma_heap->cma, pages, buffer->size);
+       cma_release(cma_heap->cma, pages, nr_pages);
        /* release sg table */
        sg_free_table(buffer->sg_table);
        kfree(buffer->sg_table);
index d79090e..2035835 100644 (file)
@@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
        struct device *dev = drvdata_to_dev(ctx->drvdata);
        struct ahash_req_ctx *state = ahash_request_ctx(req);
        u32 tmp;
-       int rc;
+       int rc = 0;
 
        memcpy(&tmp, in, sizeof(u32));
        if (tmp != CC_EXPORT_MAGIC) {
@@ -1778,9 +1778,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
        }
        in += sizeof(u32);
 
-       rc = ssi_hash_init(state, ctx);
-       if (rc)
-               goto out;
+       /* call init() to allocate bufs if the user hasn't */
+       if (!state->digest_buff) {
+               rc = ssi_hash_init(state, ctx);
+               if (rc)
+                       goto out;
+       }
 
        dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
                                ctx->inter_digestsize, DMA_BIDIRECTIONAL);
index 2d62a8c..ae6ed96 100644 (file)
@@ -361,3 +361,8 @@ static struct comedi_driver ni_atmio_driver = {
        .detach         = ni_atmio_detach,
 };
 module_comedi_driver(ni_atmio_driver);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
+
index 986c2a4..8267119 100644 (file)
@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
                              ksocknal_nid2peerlist(id.nid));
        }
 
-       route2 = NULL;
        list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
-               if (route2->ksnr_ipaddr == ipaddr)
-                       break;
-
-               route2 = NULL;
-       }
-       if (!route2) {
-               ksocknal_add_route_locked(peer, route);
-               route->ksnr_share_count++;
-       } else {
-               ksocknal_route_decref(route);
-               route2->ksnr_share_count++;
+               if (route2->ksnr_ipaddr == ipaddr) {
+                       /* Route already exists, use the old one */
+                       ksocknal_route_decref(route);
+                       route2->ksnr_share_count++;
+                       goto out;
+               }
        }
-
+       /* Route doesn't already exist, add the new one */
+       ksocknal_add_route_locked(peer, route);
+       route->ksnr_share_count++;
+out:
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
        return 0;
index 539a264..7d49d48 100644 (file)
@@ -71,16 +71,12 @@ lnet_sock_ioctl(int cmd, unsigned long arg)
        }
 
        sock_filp = sock_alloc_file(sock, 0, NULL);
-       if (IS_ERR(sock_filp)) {
-               sock_release(sock);
-               rc = PTR_ERR(sock_filp);
-               goto out;
-       }
+       if (IS_ERR(sock_filp))
+               return PTR_ERR(sock_filp);
 
        rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg);
 
        fput(sock_filp);
-out:
        return rc;
 }
 
index 2d6e64d..938b859 100644 (file)
@@ -1016,7 +1016,7 @@ static bool file_is_noatime(const struct file *file)
        if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
                return true;
 
-       if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+       if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
                return true;
 
        return false;
index 65ac512..8666f1e 100644 (file)
@@ -313,11 +313,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        }
 
        if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
                sbi->ll_flags |= LL_SBI_ACL;
        } else {
                LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
-               sb->s_flags &= ~MS_POSIXACL;
+               sb->s_flags &= ~SB_POSIXACL;
                sbi->ll_flags &= ~LL_SBI_ACL;
        }
 
@@ -660,7 +660,7 @@ void ll_kill_super(struct super_block *sb)
        struct ll_sb_info *sbi;
 
        /* not init sb ?*/
-       if (!(sb->s_flags & MS_ACTIVE))
+       if (!(sb->s_flags & SB_ACTIVE))
                return;
 
        sbi = ll_s2sbi(sb);
@@ -2039,8 +2039,8 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
        int err;
        __u32 read_only;
 
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
-               read_only = *flags & MS_RDONLY;
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+               read_only = *flags & SB_RDONLY;
                err = obd_set_info_async(NULL, sbi->ll_md_exp,
                                         sizeof(KEY_READ_ONLY),
                                         KEY_READ_ONLY, sizeof(read_only),
@@ -2053,9 +2053,9 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
                }
 
                if (read_only)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                else
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
 
                if (sbi->ll_flags & LL_SBI_VERBOSE)
                        LCONSOLE_WARN("Remounted %s %s\n", profilenm,
index b553319..15fa567 100644 (file)
@@ -208,14 +208,14 @@ struct atomisp_dis_vector {
 };
 
 
-/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
  *  arrays that contain the coeffients for each type.
  */
 struct atomisp_dvs2_coef_types {
-       short __user *odd_real; /**< real part of the odd coefficients*/
-       short __user *odd_imag; /**< imaginary part of the odd coefficients*/
-       short __user *even_real;/**< real part of the even coefficients*/
-       short __user *even_imag;/**< imaginary part of the even coefficients*/
+       short __user *odd_real; /** real part of the odd coefficients*/
+       short __user *odd_imag; /** imaginary part of the odd coefficients*/
+       short __user *even_real;/** real part of the even coefficients*/
+       short __user *even_imag;/** imaginary part of the even coefficients*/
 };
 
 /*
@@ -223,10 +223,10 @@ struct atomisp_dvs2_coef_types {
  * arrays that contain the statistics for each type.
  */
 struct atomisp_dvs2_stat_types {
-       int __user *odd_real; /**< real part of the odd statistics*/
-       int __user *odd_imag; /**< imaginary part of the odd statistics*/
-       int __user *even_real;/**< real part of the even statistics*/
-       int __user *even_imag;/**< imaginary part of the even statistics*/
+       int __user *odd_real; /** real part of the odd statistics*/
+       int __user *odd_imag; /** imaginary part of the odd statistics*/
+       int __user *even_real;/** real part of the even statistics*/
+       int __user *even_imag;/** imaginary part of the even statistics*/
 };
 
 struct atomisp_dis_coefficients {
@@ -390,16 +390,16 @@ struct atomisp_metadata_config {
  * Generic resolution structure.
  */
 struct atomisp_resolution {
-       uint32_t width;  /**< Width */
-       uint32_t height; /**< Height */
+       uint32_t width;  /** Width */
+       uint32_t height; /** Height */
 };
 
 /*
  * This specifies the coordinates (x,y)
  */
 struct atomisp_zoom_point {
-       int32_t x; /**< x coordinate */
-       int32_t y; /**< y coordinate */
+       int32_t x; /** x coordinate */
+       int32_t y; /** y coordinate */
 };
 
 /*
@@ -411,9 +411,9 @@ struct atomisp_zoom_region {
 };
 
 struct atomisp_dz_config {
-       uint32_t dx; /**< Horizontal zoom factor */
-       uint32_t dy; /**< Vertical zoom factor */
-       struct atomisp_zoom_region zoom_region; /**< region for zoom */
+       uint32_t dx; /** Horizontal zoom factor */
+       uint32_t dy; /** Vertical zoom factor */
+       struct atomisp_zoom_region zoom_region; /** region for zoom */
 };
 
 struct atomisp_parm {
@@ -758,7 +758,7 @@ enum atomisp_acc_arg_type {
        ATOMISP_ACC_ARG_FRAME        /* Frame argument */
 };
 
-/** ISP memories, isp2400 */
+/* ISP memories, isp2400 */
 enum atomisp_acc_memory {
        ATOMISP_ACC_MEMORY_PMEM0 = 0,
        ATOMISP_ACC_MEMORY_DMEM0,
index 8a18c52..debf0e3 100644 (file)
@@ -5187,7 +5187,7 @@ int get_frame_info_nop(struct atomisp_sub_device *asd,
        return 0;
 }
 
-/**
+/*
  * Resets CSS parameters that depend on input resolution.
  *
  * Update params like CSS RAW binning, 2ppc mode and pp_input
index 6e87aa5..b7f9da0 100644 (file)
@@ -4051,7 +4051,7 @@ int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
 int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
                                        unsigned int *zoom)
 {
-       struct ia_css_dz_config dz_config;  /**< Digital Zoom */
+       struct ia_css_dz_config dz_config;  /** Digital Zoom */
        struct ia_css_isp_config isp_config;
        struct atomisp_device *isp = asd->isp;
 
index 685da0f..95669ee 100644 (file)
@@ -28,17 +28,17 @@ struct atomisp_histogram32 {
 };
 
 struct atomisp_dvs2_stat_types32 {
-       compat_uptr_t odd_real; /**< real part of the odd statistics*/
-       compat_uptr_t odd_imag; /**< imaginary part of the odd statistics*/
-       compat_uptr_t even_real;/**< real part of the even statistics*/
-       compat_uptr_t even_imag;/**< imaginary part of the even statistics*/
+       compat_uptr_t odd_real; /** real part of the odd statistics*/
+       compat_uptr_t odd_imag; /** imaginary part of the odd statistics*/
+       compat_uptr_t even_real;/** real part of the even statistics*/
+       compat_uptr_t even_imag;/** imaginary part of the even statistics*/
 };
 
 struct atomisp_dvs2_coef_types32 {
-       compat_uptr_t odd_real; /**< real part of the odd coefficients*/
-       compat_uptr_t odd_imag; /**< imaginary part of the odd coefficients*/
-       compat_uptr_t even_real;/**< real part of the even coefficients*/
-       compat_uptr_t even_imag;/**< imaginary part of the even coefficients*/
+       compat_uptr_t odd_real; /** real part of the odd coefficients*/
+       compat_uptr_t odd_imag; /** imaginary part of the odd coefficients*/
+       compat_uptr_t even_real;/** real part of the even coefficients*/
+       compat_uptr_t even_imag;/** imaginary part of the even coefficients*/
 };
 
 struct atomisp_dvs2_statistics32 {
index f3d6182..c3eba67 100644 (file)
@@ -223,7 +223,7 @@ struct atomisp_subdev_params {
 
        bool dis_proj_data_valid;
 
-       struct ia_css_dz_config   dz_config;  /**< Digital Zoom */
+       struct ia_css_dz_config   dz_config;  /** Digital Zoom */
        struct ia_css_capture_config   capture_config;
 
        struct atomisp_css_isp_config config;
index 19bae16..050d60f 100644 (file)
@@ -21,7 +21,7 @@
  * Forward declarations.
  *
  **********************************************************************/
-/**
+/*
  * @brief Read the oldest element from the circular buffer.
  * Read the oldest element WITHOUT checking whehter the
  * circular buffer is empty or not. The oldest element is
@@ -34,7 +34,7 @@
 static inline ia_css_circbuf_elem_t
 ia_css_circbuf_read(ia_css_circbuf_t *cb);
 
-/**
+/*
  * @brief Shift a chunk of elements in the circular buffer.
  * A chunk of elements (i.e. the ones from the "start" position
  * to the "chunk_src" position) are shifted in the circular buffer,
@@ -48,7 +48,7 @@ static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
                                                   uint32_t chunk_src,
                                                   uint32_t chunk_dest);
 
-/**
+/*
  * @brief Get the "val" field in the element.
  *
  * @param elem The pointer to the element.
@@ -63,7 +63,7 @@ ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem);
  * Non-inline functions.
  *
  **********************************************************************/
-/**
+/*
  * @brief Create the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -88,7 +88,7 @@ ia_css_circbuf_create(ia_css_circbuf_t *cb,
        cb->elems = elems;
 }
 
-/**
+/*
  * @brief Destroy the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -99,7 +99,7 @@ void ia_css_circbuf_destroy(ia_css_circbuf_t *cb)
        cb->elems = NULL;
 }
 
-/**
+/*
  * @brief Pop a value out of the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -116,7 +116,7 @@ uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb)
        return ret;
 }
 
-/**
+/*
  * @brief Extract a value out of the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -166,7 +166,7 @@ uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
        return val;
 }
 
-/**
+/*
  * @brief Peek an element from the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -180,7 +180,7 @@ uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset)
        return cb->elems[pos].val;
 }
 
-/**
+/*
  * @brief Get the value of an element from the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -194,7 +194,7 @@ uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset)
        return cb->elems[pos].val;
 }
 
-/** @brief increase size of a circular buffer.
+/* @brief increase size of a circular buffer.
  * Use 'CAUTION' before using this function. This was added to
  * support / fix issue with increasing size for tagger only
  * Please refer to "ia_css_circbuf.h" for details.
@@ -252,7 +252,7 @@ bool ia_css_circbuf_increase_size(
  * Inline functions.
  *
  ****************************************************************/
-/**
+/*
  * @brief Get the "val" field in the element.
  * Refer to "Forward declarations" for details.
  */
@@ -262,7 +262,7 @@ ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem)
        return elem->val;
 }
 
-/**
+/*
  * @brief Read the oldest element from the circular buffer.
  * Refer to "Forward declarations" for details.
  */
@@ -282,7 +282,7 @@ ia_css_circbuf_read(ia_css_circbuf_t *cb)
        return elem;
 }
 
-/**
+/*
  * @brief Shift a chunk of elements in the circular buffer.
  * Refer to "Forward declarations" for details.
  */
index 616789d..a6d650a 100644 (file)
@@ -19,7 +19,7 @@
 #include <ia_css_frame_public.h>       /* ia_css_frame_info */
 #include <ia_css_binary.h>             /* ia_css_binary_descr */
 
-/** @brief Get a binary descriptor for copy.
+/* @brief Get a binary descriptor for copy.
  *
  * @param[in] pipe
  * @param[out] copy_desc
@@ -36,7 +36,7 @@ extern void ia_css_pipe_get_copy_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for vfpp.
+/* @brief Get a binary descriptor for vfpp.
  *
  * @param[in] pipe
  * @param[out] vfpp_descr
@@ -51,7 +51,7 @@ extern void ia_css_pipe_get_vfpp_binarydesc(
                struct ia_css_frame_info *in_info,
                struct ia_css_frame_info *out_info);
 
-/** @brief Get numerator and denominator of bayer downscaling factor.
+/* @brief Get numerator and denominator of bayer downscaling factor.
  *
  * @param[in] bds_factor: The bayer downscaling factor.
  *             (= The bds_factor member in the sh_css_bds_factor structure.)
@@ -67,7 +67,7 @@ extern enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
        unsigned int *bds_factor_numerator,
        unsigned int *bds_factor_denominator);
 
-/** @brief Get a binary descriptor for preview stage.
+/* @brief Get a binary descriptor for preview stage.
  *
  * @param[in] pipe
  * @param[out] preview_descr
@@ -86,7 +86,7 @@ extern enum ia_css_err ia_css_pipe_get_preview_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for video stage.
+/* @brief Get a binary descriptor for video stage.
  *
  * @param[in/out] pipe
  * @param[out] video_descr
@@ -105,7 +105,7 @@ extern enum ia_css_err ia_css_pipe_get_video_binarydesc(
        struct ia_css_frame_info *vf_info,
        int stream_config_left_padding);
 
-/** @brief Get a binary descriptor for yuv scaler stage.
+/* @brief Get a binary descriptor for yuv scaler stage.
  *
  * @param[in/out] pipe
  * @param[out] yuv_scaler_descr
@@ -124,7 +124,7 @@ void ia_css_pipe_get_yuvscaler_binarydesc(
        struct ia_css_frame_info *internal_out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for capture pp stage.
+/* @brief Get a binary descriptor for capture pp stage.
  *
  * @param[in/out] pipe
  * @param[out] capture_pp_descr
@@ -140,7 +140,7 @@ extern void ia_css_pipe_get_capturepp_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for primary capture.
+/* @brief Get a binary descriptor for primary capture.
  *
  * @param[in] pipe
  * @param[out] prim_descr
@@ -158,7 +158,7 @@ extern void ia_css_pipe_get_primary_binarydesc(
        struct ia_css_frame_info *vf_info,
        unsigned int stage_idx);
 
-/** @brief Get a binary descriptor for pre gdc stage.
+/* @brief Get a binary descriptor for pre gdc stage.
  *
  * @param[in] pipe
  * @param[out] pre_gdc_descr
@@ -173,7 +173,7 @@ extern void ia_css_pipe_get_pre_gdc_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for gdc stage.
+/* @brief Get a binary descriptor for gdc stage.
  *
  * @param[in] pipe
  * @param[out] gdc_descr
@@ -188,7 +188,7 @@ extern void ia_css_pipe_get_gdc_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for post gdc.
+/* @brief Get a binary descriptor for post gdc.
  *
  * @param[in] pipe
  * @param[out] post_gdc_descr
@@ -205,7 +205,7 @@ extern void ia_css_pipe_get_post_gdc_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for de.
+/* @brief Get a binary descriptor for de.
  *
  * @param[in] pipe
  * @param[out] pre_de_descr
@@ -220,7 +220,7 @@ extern void ia_css_pipe_get_pre_de_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for pre anr stage.
+/* @brief Get a binary descriptor for pre anr stage.
  *
  * @param[in] pipe
  * @param[out] pre_anr_descr
@@ -235,7 +235,7 @@ extern void ia_css_pipe_get_pre_anr_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for ANR stage.
+/* @brief Get a binary descriptor for ANR stage.
  *
  * @param[in] pipe
  * @param[out] anr_descr
@@ -250,7 +250,7 @@ extern void ia_css_pipe_get_anr_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for post anr stage.
+/* @brief Get a binary descriptor for post anr stage.
  *
  * @param[in] pipe
  * @param[out] post_anr_descr
@@ -267,7 +267,7 @@ extern void ia_css_pipe_get_post_anr_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for ldc stage.
+/* @brief Get a binary descriptor for ldc stage.
  *
  * @param[in/out] pipe
  * @param[out] capture_pp_descr
@@ -282,7 +282,7 @@ extern void ia_css_pipe_get_ldc_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Calculates the required BDS factor
+/* @brief Calculates the required BDS factor
  *
  * @param[in] input_res
  * @param[in] output_res
index ba88587..155b6fb 100644 (file)
@@ -18,7 +18,7 @@
 #include <ia_css_types.h>
 #include <ia_css_frame_public.h>
 
-/** @brief Get Input format bits per pixel based on stream configuration of this
+/* @brief Get Input format bits per pixel based on stream configuration of this
  * pipe.
  *
  * @param[in] pipe
index f8b2e45..a8c2767 100644 (file)
@@ -22,7 +22,7 @@
 #include <ia_css_stream_public.h>
 #include <ia_css_stream_format.h>
 
-/** @brief convert "errno" error code to "ia_css_err" error code
+/* @brief convert "errno" error code to "ia_css_err" error code
  *
  * @param[in]  "errno" error code
  * @return     "ia_css_err" error code
@@ -31,7 +31,7 @@
 enum ia_css_err ia_css_convert_errno(
        int in_err);
 
-/** @brief check vf frame info.
+/* @brief check vf frame info.
  *
  * @param[in] info
  * @return     IA_CSS_SUCCESS or error code upon error.
@@ -40,7 +40,7 @@ enum ia_css_err ia_css_convert_errno(
 extern enum ia_css_err ia_css_util_check_vf_info(
        const struct ia_css_frame_info * const info);
 
-/** @brief check input configuration.
+/* @brief check input configuration.
  *
  * @param[in] stream_config
  * @param[in] must_be_raw
@@ -52,7 +52,7 @@ extern enum ia_css_err ia_css_util_check_input(
        bool must_be_raw,
        bool must_be_yuv);
 
-/** @brief check vf and out frame info.
+/* @brief check vf and out frame info.
  *
  * @param[in] out_info
  * @param[in] vf_info
@@ -63,7 +63,7 @@ extern enum ia_css_err ia_css_util_check_vf_out_info(
        const struct ia_css_frame_info * const out_info,
        const struct ia_css_frame_info * const vf_info);
 
-/** @brief check width and height
+/* @brief check width and height
  *
  * @param[in] width
  * @param[in] height
@@ -75,7 +75,7 @@ extern enum ia_css_err ia_css_util_check_res(
        unsigned int height);
 
 #ifdef ISP2401
-/** @brief compare resolutions (less or equal)
+/* @brief compare resolutions (less or equal)
  *
  * @param[in] a resolution
  * @param[in] b resolution
@@ -108,7 +108,7 @@ extern bool ia_css_util_resolution_is_even(
                const struct ia_css_resolution resolution);
 
 #endif
-/** @brief check width and height
+/* @brief check width and height
  *
  * @param[in] stream_format
  * @param[in] two_ppc
@@ -119,7 +119,7 @@ extern unsigned int ia_css_util_input_format_bpp(
        enum ia_css_stream_format stream_format,
        bool two_ppc);
 
-/** @brief check if input format it raw
+/* @brief check if input format it raw
  *
  * @param[in] stream_format
  * @return true if the input format is raw or false otherwise
@@ -128,7 +128,7 @@ extern unsigned int ia_css_util_input_format_bpp(
 extern bool ia_css_util_is_input_format_raw(
        enum ia_css_stream_format stream_format);
 
-/** @brief check if input format it yuv
+/* @brief check if input format it yuv
  *
  * @param[in] stream_format
  * @return true if the input format is yuv or false otherwise
index 6720ab5..9c0cb4a 100644 (file)
@@ -277,6 +277,6 @@ static inline void csi_rx_be_ctrl_reg_store(
 
        ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
 }
-/** end of DLI */
+/* end of DLI */
 
 #endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */
index 470c92d..4d07c2f 100644 (file)
@@ -192,7 +192,7 @@ STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_dump_state(
                ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i, state->proc_state[i].isp_sync_state);
        }
 }
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -227,7 +227,7 @@ STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_reg_store(
 
        ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
 }
-/** end of DLI */
+/* end of DLI */
 
 
 #endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */
index 14d1d3b..842ae34 100644 (file)
@@ -26,7 +26,7 @@
 #include "isys_irq_private.h"
 #endif
 
-/** Public interface */
+/* Public interface */
 STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable(
        const isys_irq_ID_t     isys_irqc_id)
 {
index c17ce13..e69f398 100644 (file)
@@ -59,7 +59,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump(
                state->status, state->edge, state->mask, state->enable, state->level_no);
 }
 
-/** end of NCI */
+/* end of NCI */
 
 /* -------------------------------------------------------+
  |              Device level interface (DLI)              |
@@ -101,7 +101,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load(
        return value;
 }
 
-/** end of DLI */
+/* end of DLI */
 
 #endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
 
index 1603a09..f946105 100644 (file)
@@ -122,7 +122,7 @@ STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_dump_state(
                stream2mmio_print_sid_state(&(state->sid_state[i]));
        }
 }
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -163,6 +163,6 @@ STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_reg_store(
        ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] +
                reg * sizeof(hrt_data), value);
 }
-/** end of DLI */
+/* end of DLI */
 
 #endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */
index 3f34b50..c5bf540 100644 (file)
@@ -160,5 +160,5 @@ STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
 
        ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
 }
-/** end of DLI */
+/* end of DLI */
 #endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */
index e7a734a..1be5c69 100644 (file)
@@ -46,7 +46,7 @@ struct isys2401_dma_port_cfg_s {
        uint32_t cropping;
        uint32_t width;
  };
-/** end of DMA Port */
+/* end of DMA Port */
 
 /************************************************
  *
@@ -79,7 +79,7 @@ struct isys2401_dma_cfg_s {
        isys2401_dma_extension  extension;
        uint32_t                height;
 };
-/** end of DMA Device */
+/* end of DMA Device */
 
 /* isys2401_dma_channel limits per DMA ID */
 extern const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID];
index 216813e..0bf2feb 100644 (file)
@@ -86,6 +86,6 @@ struct pixelgen_prbs_cfg_s {
        sync_generator_cfg_t    sync_gen_cfg;
 };
 
-/** end of Pixel-generator: TPG. ("pixelgen_global.h") */
+/* end of Pixel-generator: TPG. ("pixelgen_global.h") */
 #endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */
 
index 9f7ecac..d2e3a2d 100644 (file)
@@ -331,7 +331,7 @@ typedef enum {
        IBUF_CTRL2_ID,          /* map ISYS2401_IBUF_CNTRL_C */
        N_IBUF_CTRL_ID
 } ibuf_ctrl_ID_t;
-/** end of Input-buffer Controller */
+/* end of Input-buffer Controller */
 
 /*
  * Stream2MMIO.
@@ -364,7 +364,7 @@ typedef enum {
        STREAM2MMIO_SID7_ID,
        N_STREAM2MMIO_SID_ID
 } stream2mmio_sid_ID_t;
-/** end of Stream2MMIO */
+/* end of Stream2MMIO */
 
 /**
  * Input System 2401: CSI-MIPI recevier.
@@ -390,7 +390,7 @@ typedef enum {
        CSI_RX_DLANE3_ID,               /* map to DLANE3 in CSI RX */
        N_CSI_RX_DLANE_ID
 } csi_rx_fe_dlane_ID_t;
-/** end of CSI-MIPI receiver */
+/* end of CSI-MIPI receiver */
 
 typedef enum {
        ISYS2401_DMA0_ID = 0,
@@ -406,7 +406,7 @@ typedef enum {
        PIXELGEN2_ID,
        N_PIXELGEN_ID
 } pixelgen_ID_t;
-/** end of pixel-generator. ("system_global.h") */
+/* end of pixel-generator. ("system_global.h") */
 
 typedef enum {
        INPUT_SYSTEM_CSI_PORT0_ID = 0,
index 1f6a55f..efcd6e1 100644 (file)
@@ -31,7 +31,7 @@ more details.
 #ifndef __CSS_API_VERSION_H
 #define __CSS_API_VERSION_H
 
-/** @file
+/* @file
  * CSS API version file. This file contains the version number of the CSS-API.
  *
  * This file is generated from a set of input files describing the CSS-API
index 5a4eabf..bcfd443 100644 (file)
@@ -21,7 +21,7 @@
 #endif /* __INLINE_GP_TIMER__ */
 #include "system_local.h"
 
-/** FIXME: not sure if reg_load(), reg_store() should be API.
+/* FIXME: not sure if reg_load(), reg_store() should be API.
  */
 static uint32_t
 gp_timer_reg_load(uint32_t reg);
index 3b5df85..426d022 100644 (file)
@@ -73,7 +73,7 @@ extern void csi_rx_be_ctrl_get_state(
 extern void csi_rx_be_ctrl_dump_state(
                const csi_rx_backend_ID_t ID,
                csi_rx_be_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -130,6 +130,6 @@ extern void csi_rx_be_ctrl_reg_store(
        const csi_rx_backend_ID_t ID,
        const hrt_address reg,
        const hrt_data value);
-/** end of DLI */
+/* end of DLI */
 #endif /* USE_INPUT_SYSTEM_VERSION_2401 */
 #endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
index 1ac0e64..98ee994 100644 (file)
@@ -54,7 +54,7 @@ STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_proc_state(
 STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state(
                const ibuf_ctrl_ID_t ID,
                ibuf_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -87,7 +87,7 @@ STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_reg_store(
        const ibuf_ctrl_ID_t ID,
        const hrt_address reg,
        const hrt_data value);
-/** end of DLI */
+/* end of DLI */
 
 #endif /* USE_INPUT_SYSTEM_VERSION_2401 */
 #endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */
index a025ad5..0d978e5 100644 (file)
@@ -49,7 +49,7 @@
 
 /* Arithmetic */
 
-/** @brief bitwise AND
+/* @brief bitwise AND
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -63,7 +63,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_and(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief bitwise OR
+/* @brief bitwise OR
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -77,7 +77,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_or(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief bitwise XOR
+/* @brief bitwise XOR
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -91,7 +91,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_xor(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief bitwise inverse
+/* @brief bitwise inverse
  *
  * @param[in] _a       first argument
  *
@@ -105,7 +105,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_inv(
 
 /* Additive */
 
-/** @brief addition
+/* @brief addition
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -120,7 +120,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_add(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief subtraction
+/* @brief subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -135,7 +135,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_sub(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief saturated addition
+/* @brief saturated addition
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -150,7 +150,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_addsat(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief saturated subtraction
+/* @brief saturated subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -166,7 +166,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subsat(
     const tvector1w     _b);
 
 #ifdef ISP2401
-/** @brief Unsigned saturated subtraction
+/* @brief Unsigned saturated subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -182,7 +182,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_subsat_u(
     const tvector1w_unsigned _b);
 
 #endif
-/** @brief subtraction with shift right and rounding
+/* @brief subtraction with shift right and rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -202,7 +202,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subasr1(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Subtraction with shift right and rounding
+/* @brief Subtraction with shift right and rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -217,7 +217,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalfrnd(
     const tvector1w    _a,
     const tvector1w    _b);
 
-/** @brief Subtraction with shift right and no rounding
+/* @brief Subtraction with shift right and no rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -233,7 +233,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
     const tvector1w    _b);
 
 
-/** @brief saturated absolute value
+/* @brief saturated absolute value
  *
  * @param[in] _a       input
  *
@@ -247,7 +247,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
 STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_abs(
     const tvector1w     _a);
 
-/** @brief saturated absolute difference
+/* @brief saturated absolute difference
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -264,7 +264,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subabssat(
 
 /* Multiplicative */
 
-/** @brief doubling multiply
+/* @brief doubling multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -281,7 +281,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_muld(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief integer multiply
+/* @brief integer multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -298,7 +298,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mul(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief fractional saturating multiply
+/* @brief fractional saturating multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -316,7 +316,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qmul(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief fractional saturating multiply with rounding
+/* @brief fractional saturating multiply with rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -337,7 +337,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qrmul(
 
 /* Comparative */
 
-/** @brief equal
+/* @brief equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -351,7 +351,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_eq(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief not equal
+/* @brief not equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -365,7 +365,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ne(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief less or equal
+/* @brief less or equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -379,7 +379,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_le(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief less then
+/* @brief less then
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -393,7 +393,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_lt(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief greater or equal
+/* @brief greater or equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -407,7 +407,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ge(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief greater than
+/* @brief greater than
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -423,7 +423,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_gt(
 
 /* Shift */
 
-/** @brief aritmetic shift right
+/* @brief aritmetic shift right
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -441,7 +441,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asr(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief aritmetic shift right with rounding
+/* @brief aritmetic shift right with rounding
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -460,7 +460,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asrrnd(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief saturating arithmetic shift left
+/* @brief saturating arithmetic shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -480,7 +480,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asl(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -493,7 +493,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_aslsat(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief logical shift left
+/* @brief logical shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -510,7 +510,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsl(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief logical shift right
+/* @brief logical shift right
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -528,7 +528,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsr(
     const tvector1w     _b);
 
 #ifdef ISP2401
-/** @brief bidirectional saturating arithmetic shift
+/* @brief bidirectional saturating arithmetic shift
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -546,7 +546,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift_sat(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief bidirectional non-saturating arithmetic shift
+/* @brief bidirectional non-saturating arithmetic shift
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -565,7 +565,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift(
     const tvector1w     _b);
 
 
-/** @brief bidirectional logical shift
+/* @brief bidirectional logical shift
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -588,7 +588,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
 #endif
 /* Cast */
 
-/** @brief Cast from int to 1w
+/* @brief Cast from int to 1w
  *
  * @param[in] _a       input
  *
@@ -601,7 +601,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
 STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
     const int           _a);
 
-/** @brief Cast from 1w to int
+/* @brief Cast from 1w to int
  *
  * @param[in] _a       input
  *
@@ -614,7 +614,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
 STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
     const tvector1w      _a);
 
-/** @brief Cast from 1w to 2w
+/* @brief Cast from 1w to 2w
  *
  * @param[in] _a       input
  *
@@ -627,7 +627,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
 STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_cast_to_2w(
     const tvector1w     _a);
 
-/** @brief Cast from 2w to 1w
+/* @brief Cast from 2w to 1w
  *
  * @param[in] _a       input
  *
@@ -641,7 +641,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_cast_to_1w(
     const tvector2w    _a);
 
 
-/** @brief Cast from 2w to 1w with saturation
+/* @brief Cast from 2w to 1w with saturation
  *
  * @param[in] _a       input
  *
@@ -657,7 +657,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_sat_cast_to_1w(
 
 /* clipping */
 
-/** @brief Clip asymmetrical
+/* @brief Clip asymmetrical
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -673,7 +673,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clip_asym(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Clip zero
+/* @brief Clip zero
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -691,7 +691,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clipz(
 
 /* division */
 
-/** @brief Truncated division
+/* @brief Truncated division
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -708,7 +708,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_div(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Fractional saturating divide
+/* @brief Fractional saturating divide
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -726,7 +726,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qdiv(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Modulo
+/* @brief Modulo
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -741,7 +741,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mod(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Unsigned integer Square root
+/* @brief Unsigned integer Square root
  *
  * @param[in] _a       input
  *
@@ -754,7 +754,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_sqrt_u(
 
 /* Miscellaneous */
 
-/** @brief Multiplexer
+/* @brief Multiplexer
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -770,7 +770,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mux(
     const tvector1w     _b,
     const tflags           _c);
 
-/** @brief Average without rounding
+/* @brief Average without rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -786,7 +786,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w  OP_1w_avg(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Average with rounding
+/* @brief Average with rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -802,7 +802,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_avgrnd(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Minimum
+/* @brief Minimum
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -816,7 +816,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_min(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Maximum
+/* @brief Maximum
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
index cf7e731..7575d26 100644 (file)
@@ -48,7 +48,7 @@
 
 /* Arithmetic */
 
-/** @brief bitwise AND
+/* @brief bitwise AND
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -62,7 +62,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_and(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief bitwise OR
+/* @brief bitwise OR
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -76,7 +76,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_or(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief bitwise XOR
+/* @brief bitwise XOR
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -90,7 +90,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_xor(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief bitwise inverse
+/* @brief bitwise inverse
  *
  * @param[in] _a       first argument
  *
@@ -104,7 +104,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_inv(
 
 /* Additive */
 
-/** @brief addition
+/* @brief addition
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -119,7 +119,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_add(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief subtraction
+/* @brief subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -134,7 +134,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_sub(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief saturated addition
+/* @brief saturated addition
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -149,7 +149,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_addsat(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief saturated subtraction
+/* @brief saturated subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -164,7 +164,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subsat(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief subtraction with shift right and rounding
+/* @brief subtraction with shift right and rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -184,7 +184,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subasr1(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Subtraction with shift right and rounding
+/* @brief Subtraction with shift right and rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -199,7 +199,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalfrnd(
     const tvector2w    _a,
     const tvector2w    _b);
 
-/** @brief Subtraction with shift right and no rounding
+/* @brief Subtraction with shift right and no rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -214,7 +214,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
     const tvector2w    _a,
     const tvector2w    _b);
 
-/** @brief saturated absolute value
+/* @brief saturated absolute value
  *
  * @param[in] _a       input
  *
@@ -228,7 +228,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
 STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_abs(
     const tvector2w     _a);
 
-/** @brief saturated absolute difference
+/* @brief saturated absolute difference
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -245,7 +245,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subabssat(
 
 /* Multiplicative */
 
-/** @brief integer multiply
+/* @brief integer multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -262,7 +262,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mul(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief fractional saturating multiply
+/* @brief fractional saturating multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -279,7 +279,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qmul(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief fractional saturating multiply with rounding
+/* @brief fractional saturating multiply with rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -301,7 +301,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qrmul(
 
 /* Comparative */
 
-/** @brief equal
+/* @brief equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -315,7 +315,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_eq(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief not equal
+/* @brief not equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -329,7 +329,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ne(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief less or equal
+/* @brief less or equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -343,7 +343,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_le(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief less then
+/* @brief less then
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -357,7 +357,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_lt(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief greater or equal
+/* @brief greater or equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -371,7 +371,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ge(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief greater than
+/* @brief greater than
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -387,7 +387,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_gt(
 
 /* Shift */
 
-/** @brief aritmetic shift right
+/* @brief aritmetic shift right
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -404,7 +404,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asr(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief aritmetic shift right with rounding
+/* @brief aritmetic shift right with rounding
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -423,7 +423,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asrrnd(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -443,7 +443,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asl(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -456,7 +456,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_aslsat(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief logical shift left
+/* @brief logical shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -473,7 +473,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsl(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief logical shift right
+/* @brief logical shift right
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -492,7 +492,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsr(
 
 /* clipping */
 
-/** @brief Clip asymmetrical
+/* @brief Clip asymmetrical
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -507,7 +507,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clip_asym(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Clip zero
+/* @brief Clip zero
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -524,7 +524,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clipz(
 
 /* division */
 
-/** @brief Truncated division
+/* @brief Truncated division
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -541,7 +541,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_div(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Saturating truncated division
+/* @brief Saturating truncated division
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -559,7 +559,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w OP_2w_divh(
     const tvector2w     _a,
     const tvector1w     _b);
 
-/** @brief Modulo
+/* @brief Modulo
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -572,7 +572,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mod(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Unsigned Integer Square root
+/* @brief Unsigned Integer Square root
  *
  * @param[in] _a       input
  *
@@ -585,7 +585,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w_unsigned OP_2w_sqrt_u(
 
 /* Miscellaneous */
 
-/** @brief Multiplexer
+/* @brief Multiplexer
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -601,7 +601,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mux(
     const tvector2w     _b,
     const tflags           _c);
 
-/** @brief Average without rounding
+/* @brief Average without rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -617,7 +617,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w  OP_2w_avg(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Average with rounding
+/* @brief Average with rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -633,7 +633,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_avgrnd(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Minimum
+/* @brief Minimum
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -647,7 +647,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_min(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Maximum
+/* @brief Maximum
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
index 5624cfc..6c53ca9 100644 (file)
@@ -43,7 +43,7 @@ STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_sid_state(
                const stream2mmio_ID_t ID,
                const stream2mmio_sid_ID_t sid_id,
                stream2mmio_sid_state_t *state);
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -96,6 +96,6 @@ STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_reg_store(
                const stream2mmio_ID_t ID,
                const hrt_address reg,
                const hrt_data value);
-/** end of DLI */
+/* end of DLI */
 
 #endif /* __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__ */
index c0f3f3e..f597e07 100644 (file)
@@ -41,7 +41,7 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_get_state(
 STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_dump_state(
                const pixelgen_ID_t ID,
                pixelgen_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -73,7 +73,7 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
        const pixelgen_ID_t ID,
        const hrt_address reg,
        const hrt_data value);
-/** end of DLI */
+/* end of DLI */
 
 #endif /* USE_INPUT_SYSTEM_VERSION_2401 */
 #endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
index a202d6d..c1638c0 100644 (file)
@@ -27,7 +27,7 @@
 
 #include "ref_vector_func_types.h"
 
-/** @brief Doubling multiply accumulate with saturation
+/* @brief Doubling multiply accumulate with saturation
  *
  * @param[in] acc accumulator
  * @param[in] a multiply input
@@ -44,7 +44,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd_sat(
        tvector1w a,
        tvector1w b );
 
-/** @brief Doubling multiply accumulate
+/* @brief Doubling multiply accumulate
  *
  * @param[in] acc accumulator
  * @param[in] a multiply input
@@ -61,7 +61,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd(
        tvector1w a,
        tvector1w b );
 
-/** @brief Re-aligning multiply
+/* @brief Re-aligning multiply
  *
  * @param[in] a multiply input
  * @param[in] b multiply input
@@ -78,7 +78,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
        tvector1w b,
        tscalar1w shift );
 
-/** @brief Leading bit index
+/* @brief Leading bit index
  *
  * @param[in] a        input
  *
@@ -92,7 +92,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_lod(
                tvector1w a);
 
-/** @brief Config Unit Input Processing
+/* @brief Config Unit Input Processing
  *
  * @param[in] a            input
  * @param[in] input_scale   input scaling factor
@@ -111,7 +111,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_input_scaling_offset_clamping(
        tscalar1w_5bit_signed input_scale,
        tscalar1w_5bit_signed input_offset);
 
-/** @brief Config Unit Output Processing
+/* @brief Config Unit Output Processing
  *
  * @param[in] a             output
  * @param[in] output_scale   output scaling factor
@@ -127,7 +127,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_output_scaling_clamping(
        tvector1w a,
        tscalar1w_5bit_signed output_scale);
 
-/** @brief Config Unit Piecewiselinear estimation
+/* @brief Config Unit Piecewiselinear estimation
  *
  * @param[in] a                  input
  * @param[in] config_points   config parameter structure
@@ -143,7 +143,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_piecewise_estimation(
        tvector1w a,
        ref_config_points config_points);
 
-/** @brief Fast Config Unit
+/* @brief Fast Config Unit
  *
  * @param[in] x                input
  * @param[in] init_vectors     LUT data structure
@@ -161,7 +161,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H  tvector1w OP_1w_XCU(
        xcu_ref_init_vectors init_vectors);
 
 
-/** @brief LXCU
+/* @brief LXCU
  *
  * @param[in] x                input
  * @param[in] init_vectors     LUT data structure
@@ -180,7 +180,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_LXCU(
        tvector1w x,
        xcu_ref_init_vectors init_vectors);
 
-/** @brief Coring
+/* @brief Coring
  *
  * @param[in] coring_vec   Amount of coring based on brightness level
  * @param[in] filt_input   Vector of input pixels on which Coring is applied
@@ -196,7 +196,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
        tvector1w filt_input,
        tscalar1w m_CnrCoring0 );
 
-/** @brief Normalised FIR with coefficients [3,4,1]
+/* @brief Normalised FIR with coefficients [3,4,1]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -209,7 +209,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [1,4,3]
+/* @brief Normalised FIR with coefficients [1,4,3]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -222,7 +222,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [1,2,1]
+/* @brief Normalised FIR with coefficients [1,2,1]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -234,7 +234,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [13,16,3]
+/* @brief Normalised FIR with coefficients [13,16,3]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -246,7 +246,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [9,16,7]
+/* @brief Normalised FIR with coefficients [9,16,7]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -258,7 +258,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [5,16,11]
+/* @brief Normalised FIR with coefficients [5,16,11]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -270,7 +270,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [1,16,15]
+/* @brief Normalised FIR with coefficients [1,16,15]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -282,7 +282,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with programable phase shift
+/* @brief Normalised FIR with programable phase shift
  *
  * @param[in] m        1x3 matrix with pixels
  * @param[in] coeff    phase shift
@@ -295,7 +295,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_calc_coeff (
        const s_1w_1x3_matrix           m, tscalar1w_3bit coeff);
 
-/** @brief 3 tap FIR with coefficients [1,1,1]
+/* @brief 3 tap FIR with coefficients [1,1,1]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -308,7 +308,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_9dB_nrm (
        const s_1w_1x3_matrix           m);
 
 #ifdef ISP2401
-/** @brief      symmetric 3 tap FIR acts as LPF or BSF
+/* @brief      symmetric 3 tap FIR acts as LPF or BSF
  *
  * @param[in] m 1x3 matrix with pixels
  * @param[in] k filter coefficient shift
@@ -336,7 +336,7 @@ sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
                    tscalar_bool bsf_flag);
 #endif
 
-/** @brief Normalised 2D FIR with coefficients  [1;2;1] * [1,2,1]
+/* @brief Normalised 2D FIR with coefficients  [1;2;1] * [1,2,1]
  *
  * @param[in] m        3x3 matrix with pixels
  *
@@ -353,7 +353,7 @@ sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
        const s_1w_3x3_matrix           m);
 
-/** @brief Normalised 2D FIR with coefficients  [1;1;1] * [1,1,1]
+/* @brief Normalised 2D FIR with coefficients  [1;1;1] * [1,1,1]
  *
  * @param[in] m        3x3 matrix with pixels
  *
@@ -371,7 +371,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
        const s_1w_3x3_matrix           m);
 
-/** @brief Normalised dual output 2D FIR with coefficients  [1;2;1] * [1,2,1]
+/* @brief Normalised dual output 2D FIR with coefficients  [1;2;1] * [1,2,1]
  *
  * @param[in] m        4x3 matrix with pixels
  *
@@ -391,7 +391,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
  STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_6dB_out2x1_nrm (
        const s_1w_4x3_matrix           m);
 
-/** @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1]
+/* @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1]
  *
  * @param[in] m        4x3 matrix with pixels
  *
@@ -411,7 +411,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
        const s_1w_4x3_matrix           m);
 
-/** @brief Normalised 2D FIR 5x5
+/* @brief Normalised 2D FIR 5x5
  *
  * @param[in] m        5x5 matrix with pixels
  *
@@ -429,7 +429,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
        const s_1w_5x5_matrix   m);
 
-/** @brief Normalised FIR 1x5
+/* @brief Normalised FIR 1x5
  *
  * @param[in] m        1x5 matrix with pixels
  *
@@ -447,7 +447,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
        const s_1w_1x5_matrix m);
 
-/** @brief Normalised 2D FIR 5x5
+/* @brief Normalised 2D FIR 5x5
  *
  * @param[in] m        5x5 matrix with pixels
  *
@@ -465,7 +465,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
        const s_1w_5x5_matrix m);
 
-/** @brief Approximate averaging FIR 1x5
+/* @brief Approximate averaging FIR 1x5
  *
  * @param[in] m        1x5 matrix with pixels
  *
@@ -479,7 +479,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
        s_1w_1x5_matrix m);
 
-/** @brief Approximate averaging FIR 1x9
+/* @brief Approximate averaging FIR 1x9
  *
  * @param[in] m        1x9 matrix with pixels
  *
@@ -493,7 +493,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
        s_1w_1x9_matrix m);
 
-/** @brief Approximate averaging FIR 1x11
+/* @brief Approximate averaging FIR 1x11
  *
  * @param[in] m        1x11 matrix with pixels
  *
@@ -507,7 +507,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x11m_box (
        s_1w_1x11_matrix m);
 
-/** @brief Symmetric 7 tap filter with normalization
+/* @brief Symmetric 7 tap filter with normalization
  *
  *  @param[in] in 1x7 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -528,7 +528,7 @@ fir1x7m_sym_nrm(s_1w_1x7_matrix in,
                s_1w_1x4_matrix coeff,
                tvector1w out_shift);
 
-/** @brief Symmetric 7 tap filter with normalization at input side
+/* @brief Symmetric 7 tap filter with normalization at input side
  *
  *  @param[in] in 1x7 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -549,7 +549,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 fir1x7m_sym_innrm_approx(s_1w_1x7_matrix in,
                         s_1w_1x4_matrix coeff);
 
-/** @brief Symmetric 7 tap filter with normalization at output side
+/* @brief Symmetric 7 tap filter with normalization at output side
  *
  *  @param[in] in 1x7 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -571,7 +571,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 fir1x7m_sym_outnrm_approx(s_1w_1x7_matrix in,
                         s_1w_1x4_matrix coeff);
 
-/** @brief 4 tap filter with normalization
+/* @brief 4 tap filter with normalization
  *
  *  @param[in] in 1x4 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -588,7 +588,7 @@ fir1x4m_nrm(s_1w_1x4_matrix in,
                s_1w_1x4_matrix coeff,
                tvector1w out_shift);
 
-/** @brief 4 tap filter with normalization for half pixel interpolation
+/* @brief 4 tap filter with normalization for half pixel interpolation
  *
  *  @param[in] in 1x4 matrix with pixels
  *
@@ -604,7 +604,7 @@ fir1x4m_nrm(s_1w_1x4_matrix in,
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 fir1x4m_bicubic_bezier_half(s_1w_1x4_matrix in);
 
-/** @brief 4 tap filter with normalization for quarter pixel interpolation
+/* @brief 4 tap filter with normalization for quarter pixel interpolation
  *
  *  @param[in] in 1x4 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -626,7 +626,7 @@ fir1x4m_bicubic_bezier_quarter(s_1w_1x4_matrix in,
                        s_1w_1x4_matrix coeff);
 
 
-/** @brief Symmetric 3 tap filter with normalization
+/* @brief Symmetric 3 tap filter with normalization
  *
  *  @param[in] in 1x3 matrix with pixels
  *  @param[in] coeff 1x2 matrix with coefficients
@@ -646,7 +646,7 @@ fir1x3m_sym_nrm(s_1w_1x3_matrix in,
                s_1w_1x2_matrix coeff,
                tvector1w out_shift);
 
-/** @brief Symmetric 3 tap filter with normalization
+/* @brief Symmetric 3 tap filter with normalization
  *
  *  @param[in] in 1x3 matrix with pixels
  *  @param[in] coeff 1x2 matrix with coefficients
@@ -666,7 +666,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
                       s_1w_1x2_matrix coeff);
 
-/** @brief Mean of 1x3 matrix
+/* @brief Mean of 1x3 matrix
  *
  *  @param[in] m 1x3 matrix with pixels
  *
@@ -678,7 +678,7 @@ fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
        s_1w_1x3_matrix m);
 
-/** @brief Mean of 3x3 matrix
+/* @brief Mean of 3x3 matrix
  *
  *  @param[in] m 3x3 matrix with pixels
  *
@@ -690,7 +690,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
        s_1w_3x3_matrix m);
 
-/** @brief Mean of 1x4 matrix
+/* @brief Mean of 1x4 matrix
  *
  *  @param[in] m 1x4 matrix with pixels
  *
@@ -701,7 +701,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
        s_1w_1x4_matrix m);
 
-/** @brief Mean of 4x4 matrix
+/* @brief Mean of 4x4 matrix
  *
  *  @param[in] m 4x4 matrix with pixels
  *
@@ -712,7 +712,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
        s_1w_4x4_matrix m);
 
-/** @brief Mean of 2x3 matrix
+/* @brief Mean of 2x3 matrix
  *
  *  @param[in] m 2x3 matrix with pixels
  *
@@ -724,7 +724,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
        s_1w_2x3_matrix m);
 
-/** @brief Mean of 1x5 matrix
+/* @brief Mean of 1x5 matrix
  *
  *  @param[in] m 1x5 matrix with pixels
  *
@@ -735,7 +735,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
 */
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
 
-/** @brief Mean of 1x6 matrix
+/* @brief Mean of 1x6 matrix
  *
  *  @param[in] m 1x6 matrix with pixels
  *
@@ -747,7 +747,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
        s_1w_1x6_matrix m);
 
-/** @brief Mean of 5x5 matrix
+/* @brief Mean of 5x5 matrix
  *
  *  @param[in] m 5x5 matrix with pixels
  *
@@ -759,7 +759,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
        s_1w_5x5_matrix m);
 
-/** @brief Mean of 6x6 matrix
+/* @brief Mean of 6x6 matrix
  *
  *  @param[in] m 6x6 matrix with pixels
  *
@@ -771,7 +771,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
        s_1w_6x6_matrix m);
 
-/** @brief Minimum of 4x4 matrix
+/* @brief Minimum of 4x4 matrix
  *
  *  @param[in] m 4x4 matrix with pixels
  *
@@ -783,7 +783,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
        s_1w_4x4_matrix m);
 
-/** @brief Maximum of 4x4 matrix
+/* @brief Maximum of 4x4 matrix
  *
  *  @param[in] m 4x4 matrix with pixels
  *
@@ -795,7 +795,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w max4x4m(
        s_1w_4x4_matrix m);
 
-/** @brief SAD between two 3x3 matrices
+/* @brief SAD between two 3x3 matrices
  *
  *  @param[in] a 3x3 matrix with pixels
  *
@@ -813,7 +813,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m_precise(
        s_1w_3x3_matrix a,
        s_1w_3x3_matrix b);
 
-/** @brief SAD between two 3x3 matrices
+/* @brief SAD between two 3x3 matrices
  *
  *  @param[in] a 3x3 matrix with pixels
  *
@@ -833,7 +833,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m(
        s_1w_3x3_matrix a,
        s_1w_3x3_matrix b);
 
-/** @brief SAD between two 5x5 matrices
+/* @brief SAD between two 5x5 matrices
  *
  *  @param[in] a 5x5 matrix with pixels
  *
@@ -847,7 +847,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
        s_1w_5x5_matrix a,
        s_1w_5x5_matrix b);
 
-/** @brief Absolute gradient between two sets of 1x5 matrices
+/* @brief Absolute gradient between two sets of 1x5 matrices
  *
  *  @param[in] m0 first set of 1x5 matrix with pixels
  *  @param[in] m1 second set of 1x5 matrix with pixels
@@ -860,7 +860,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 absgrad1x5m(s_1w_1x5_matrix m0, s_1w_1x5_matrix m1);
 
-/** @brief Bi-linear Interpolation optimized(approximate)
+/* @brief Bi-linear Interpolation optimized(approximate)
  *
  * @param[in] a input0
  * @param[in] b input1
@@ -882,7 +882,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx_c(
        tvector1w b,
        tscalar1w_weight c);
 
-/** @brief Bi-linear Interpolation optimized(approximate)
+/* @brief Bi-linear Interpolation optimized(approximate)
  *
  * @param[in] a input0
  * @param[in] b input1
@@ -904,7 +904,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx(
        tvector1w b,
        tvector1w_weight c);
 
-/** @brief Bi-linear Interpolation
+/* @brief Bi-linear Interpolation
  *
  * @param[in] a input0
  * @param[in] b input1
@@ -925,7 +925,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol(
        tvector1w b,
        tscalar1w_weight c);
 
-/** @brief Generic Block Matching Algorithm
+/* @brief Generic Block Matching Algorithm
  * @param[in] search_window pointer to input search window of 16x16 pixels
  * @param[in] ref_block pointer to input reference block of 8x8 pixels, where N<=M
  * @param[in] output pointer to output sads
@@ -954,9 +954,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H int generic_block_matching_algorithm(
        tscalar1w_4bit_bma_shift shift);
 
 #ifndef ISP2401
-/** @brief OP_1w_asp_bma_16_1_32way
+/* @brief OP_1w_asp_bma_16_1_32way
 #else
-/** @brief OP_1w_asp_bma_16_1_32way_nomask
+/* @brief OP_1w_asp_bma_16_1_32way_nomask
 #endif
  *
  * @param[in] search_area input search window of 16x16 pixels
@@ -984,9 +984,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_1 OP_1w_asp_bma_16_1_32way_nomask(
        tscalar1w_4bit_bma_shift shift);
 
 #ifndef ISP2401
-/** @brief OP_1w_asp_bma_16_2_32way
+/* @brief OP_1w_asp_bma_16_2_32way
 #else
-/** @brief OP_1w_asp_bma_16_2_32way_nomask
+/* @brief OP_1w_asp_bma_16_2_32way_nomask
 #endif
  *
  * @param[in] search_area input search window of 16x16 pixels
@@ -1011,9 +1011,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_2 OP_1w_asp_bma_16_2_32way_nomask(
        ref_block_8x8 input_block,
        tscalar1w_4bit_bma_shift shift);
 #ifndef ISP2401
-/** @brief OP_1w_asp_bma_14_1_32way
+/* @brief OP_1w_asp_bma_14_1_32way
 #else
-/** @brief OP_1w_asp_bma_14_1_32way_nomask
+/* @brief OP_1w_asp_bma_14_1_32way_nomask
 #endif
  *
  * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
@@ -1041,9 +1041,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_1 OP_1w_asp_bma_14_1_32way_nomask(
        tscalar1w_4bit_bma_shift shift);
 
 #ifndef ISP2401
-/** @brief OP_1w_asp_bma_14_2_32way
+/* @brief OP_1w_asp_bma_14_2_32way
 #else
-/** @brief OP_1w_asp_bma_14_2_32way_nomask
+/* @brief OP_1w_asp_bma_14_2_32way_nomask
 #endif
  *
  * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
@@ -1069,7 +1069,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_2 OP_1w_asp_bma_14_2_32way_nomask(
        tscalar1w_4bit_bma_shift shift);
 
 #ifdef ISP2401
-/** @brief multiplex addition and passing
+/* @brief multiplex addition and passing
  *
  *  @param[in] _a first pixel
  *  @param[in] _b second pixel
@@ -1087,7 +1087,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_cond_add(
 
 #endif
 #ifdef HAS_bfa_unit
-/** @brief OP_1w_single_bfa_7x7
+/* @brief OP_1w_single_bfa_7x7
  *
  * @param[in] weights - spatial and range weight lut
  * @param[in] threshold - threshold plane, for range weight scaling
@@ -1115,7 +1115,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_single_bfa_7x7(
        tvector1w central_pix,
        s_1w_7x7_matrix src_plane);
 
-/** @brief OP_1w_joint_bfa_7x7
+/* @brief OP_1w_joint_bfa_7x7
  *
  * @param[in] weights - spatial and range weight lut
  * @param[in] threshold0 - 1st threshold plane, for range weight scaling
@@ -1149,7 +1149,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_joint_bfa_7x7(
        tvector1w central_pix1,
        s_1w_7x7_matrix src1_plane);
 
-/** @brief bbb_bfa_gen_spatial_weight_lut
+/* @brief bbb_bfa_gen_spatial_weight_lut
  *
  * @param[in] in - 7x7 matrix of spatial weights
  * @param[in] out - generated LUT
@@ -1163,7 +1163,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_spatial_weight_lut(
        s_1w_7x7_matrix in,
        tvector1w out[BFA_MAX_KWAY]);
 
-/** @brief bbb_bfa_gen_range_weight_lut
+/* @brief bbb_bfa_gen_range_weight_lut
  *
  * @param[in] in - input range weight,
  * @param[in] out - generated LUT
@@ -1184,7 +1184,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
 #endif
 
 #ifdef ISP2401
-/** @brief OP_1w_imax32
+/* @brief OP_1w_imax32
  *
  * @param[in] src - structure that holds an array of 32 elements.
  *
@@ -1195,7 +1195,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
 STORAGE_CLASS_REF_VECTOR_FUNC_H int OP_1w_imax32(
        imax32_ref_in_vector src);
 
-/** @brief OP_1w_imaxidx32
+/* @brief OP_1w_imaxidx32
  *
  * @param[in] src - structure that holds a vector of elements.
  *
index e85e5c8..6436dae 100644 (file)
@@ -168,7 +168,7 @@ static inline unsigned int round_half_down_mul(unsigned int a, unsigned int b)
 }
 #endif
 
-/** @brief Next Power of Two
+/* @brief Next Power of Two
  *
  *  @param[in] unsigned number
  *
index d80437c..f4d9674 100644 (file)
@@ -23,7 +23,7 @@
  */
 
 
-/** @brief Copy from src_buf to dest_buf.
+/* @brief Copy from src_buf to dest_buf.
  *
  * @param[out] dest_buf. Destination buffer to copy to
  * @param[in]  dest_size. The size of the destination buffer in bytes
@@ -53,7 +53,7 @@ static inline int memcpy_s(
        return 0;
 }
 
-/** @brief Get the length of the string, excluding the null terminator
+/* @brief Get the length of the string, excluding the null terminator
  *
  * @param[in]  src_str. The source string
  * @param[in]  max_len. Look only for max_len bytes in the string
@@ -78,7 +78,7 @@ static size_t strnlen_s(
        return ix;
 }
 
-/** @brief Copy string from src_str to dest_str
+/* @brief Copy string from src_str to dest_str
  *
  * @param[out] dest_str. Destination buffer to copy to
  * @param[in]  dest_size. The size of the destination buffer in bytes
@@ -120,7 +120,7 @@ static inline int strncpy_s(
        return 0;
 }
 
-/** @brief Copy string from src_str to dest_str
+/* @brief Copy string from src_str to dest_str
  *
  * @param[out] dest_str. Destination buffer to copy to
  * @param[in]  dest_size. The size of the destination buffer in bytes
index 9aa8c16..2cf1d58 100644 (file)
@@ -17,7 +17,7 @@
 #include <assert_support.h>
 #include "tag_local.h"
 
-/**
+/*
  * @brief      Creates the tag description from the given parameters.
  * @param[in]  num_captures
  * @param[in]  skip
@@ -39,7 +39,7 @@ sh_css_create_tag_descr(int num_captures,
        tag_descr->exp_id       = exp_id;
 }
 
-/**
+/*
  * @brief      Encodes the members of tag description into a 32-bit value.
  * @param[in]  tag             Pointer to the tag description
  * @return     (unsigned int)  Encoded 32-bit tag-info
index 2458b37..e44df69 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef _IA_CSS_H_
 #define _IA_CSS_H_
 
-/** @file
+/* @file
  * This file is the starting point of the CSS-API. It includes all CSS-API
  * header files.
  */
index a80a7db..0801987 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_3A_H
 #define __IA_CSS_3A_H
 
-/** @file
+/* @file
  * This file contains types used for 3A statistics
  */
 
@@ -31,7 +31,7 @@ enum ia_css_3a_tables {
        IA_CSS_NUM_3A_TABLES
 };
 
-/** Structure that holds 3A statistics in the ISP internal
+/* Structure that holds 3A statistics in the ISP internal
  * format. Use ia_css_get_3a_statistics() to translate
  * this to the format used on the host (3A library).
  * */
@@ -48,13 +48,13 @@ struct ia_css_isp_3a_statistics {
        struct {
                ia_css_ptr rgby_tbl;
        } data_hmem;
-       uint32_t exp_id;     /**< exposure id, to match statistics to a frame,
+       uint32_t exp_id;     /** exposure id, to match statistics to a frame,
                                  see ia_css_event_public.h for more detail. */
-       uint32_t isp_config_id;/**< Unique ID to track which config was actually applied to a particular frame */
-       ia_css_ptr data_ptr; /**< pointer to base of all data */
-       uint32_t   size;     /**< total size of all data */
+       uint32_t isp_config_id;/** Unique ID to track which config was actually applied to a particular frame */
+       ia_css_ptr data_ptr; /** pointer to base of all data */
+       uint32_t   size;     /** total size of all data */
        uint32_t   dmem_size;
-       uint32_t   vmem_size; /**< both lo and hi have this size */
+       uint32_t   vmem_size; /** both lo and hi have this size */
        uint32_t   hmem_size;
 };
 #define SIZE_OF_DMEM_STRUCT                                            \
@@ -77,7 +77,7 @@ struct ia_css_isp_3a_statistics {
         SIZE_OF_IA_CSS_PTR +                                           \
         4 * sizeof(uint32_t))
 
-/** Map with host-side pointers to ISP-format statistics.
+/* Map with host-side pointers to ISP-format statistics.
  * These pointers can either be copies of ISP data or memory mapped
  * ISP pointers.
  * All of the data behind these pointers is allocated contiguously, the
@@ -85,17 +85,17 @@ struct ia_css_isp_3a_statistics {
  * point into this one block of data.
  */
 struct ia_css_isp_3a_statistics_map {
-       void                    *data_ptr; /**< Pointer to start of memory */
+       void                    *data_ptr; /** Pointer to start of memory */
        struct ia_css_3a_output *dmem_stats;
        uint16_t                *vmem_stats_hi;
        uint16_t                *vmem_stats_lo;
        struct ia_css_bh_table  *hmem_stats;
-       uint32_t                 size; /**< total size in bytes of data_ptr */
-       uint32_t                 data_allocated; /**< indicate whether data_ptr
+       uint32_t                 size; /** total size in bytes of data_ptr */
+       uint32_t                 data_allocated; /** indicate whether data_ptr
                                                    was allocated or not. */
 };
 
-/** @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
+/* @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
  * @param[out] host_stats Host buffer.
  * @param[in]  isp_stats ISP buffer.
  * @return     error value if temporary memory cannot be allocated
@@ -109,7 +109,7 @@ enum ia_css_err
 ia_css_get_3a_statistics(struct ia_css_3a_statistics           *host_stats,
                         const struct ia_css_isp_3a_statistics *isp_stats);
 
-/** @brief Translate 3A statistics from ISP format to host format.
+/* @brief Translate 3A statistics from ISP format to host format.
  * @param[out] host_stats host-format statistics
  * @param[in]  isp_stats  ISP-format statistics
  * @return     None
@@ -125,35 +125,35 @@ ia_css_translate_3a_statistics(
 
 /* Convenience functions for alloc/free of certain datatypes */
 
-/** @brief Allocate memory for the 3a statistics on the ISP
+/* @brief Allocate memory for the 3a statistics on the ISP
  * @param[in]  grid The grid.
  * @return             Pointer to the allocated 3a statistics buffer on the ISP
 */
 struct ia_css_isp_3a_statistics *
 ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
 
-/** @brief Free the 3a statistics memory on the isp
+/* @brief Free the 3a statistics memory on the isp
  * @param[in]  me Pointer to the 3a statistics buffer on the ISP.
  * @return             None
 */
 void
 ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me);
 
-/** @brief Allocate memory for the 3a statistics on the host
+/* @brief Allocate memory for the 3a statistics on the host
  * @param[in]  grid The grid.
  * @return             Pointer to the allocated 3a statistics buffer on the host
 */
 struct ia_css_3a_statistics *
 ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
 
-/** @brief Free the 3a statistics memory on the host
+/* @brief Free the 3a statistics memory on the host
  * @param[in]  me Pointer to the 3a statistics buffer on the host.
  * @return             None
  */
 void
 ia_css_3a_statistics_free(struct ia_css_3a_statistics *me);
 
-/** @brief Allocate a 3a statistics map structure
+/* @brief Allocate a 3a statistics map structure
  * @param[in]  isp_stats pointer to ISP 3a statistis struct
  * @param[in]  data_ptr  host-side pointer to ISP 3a statistics.
  * @return             Pointer to the allocated 3a statistics map
@@ -174,7 +174,7 @@ ia_css_isp_3a_statistics_map_allocate(
        const struct ia_css_isp_3a_statistics *isp_stats,
        void *data_ptr);
 
-/** @brief Free the 3a statistics map
+/* @brief Free the 3a statistics map
  * @param[in]  me Pointer to the 3a statistics map
  * @return             None
  *
index a2a1873..138bc3b 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef _IA_CSS_ACC_TYPES_H
 #define _IA_CSS_ACC_TYPES_H
 
-/** @file
+/* @file
  * This file contains types used for acceleration
  */
 
  * in the kernel and HAL.
 */
 
-/** Type of acceleration.
+/* Type of acceleration.
  */
 enum ia_css_acc_type {
-       IA_CSS_ACC_NONE,        /**< Normal binary */
-       IA_CSS_ACC_OUTPUT,      /**< Accelerator stage on output frame */
-       IA_CSS_ACC_VIEWFINDER,  /**< Accelerator stage on viewfinder frame */
-       IA_CSS_ACC_STANDALONE,  /**< Stand-alone acceleration */
+       IA_CSS_ACC_NONE,        /** Normal binary */
+       IA_CSS_ACC_OUTPUT,      /** Accelerator stage on output frame */
+       IA_CSS_ACC_VIEWFINDER,  /** Accelerator stage on viewfinder frame */
+       IA_CSS_ACC_STANDALONE,  /** Stand-alone acceleration */
 };
 
-/** Cells types
+/* Cells types
  */
 enum ia_css_cell_type {
        IA_CSS_SP0 = 0,
@@ -58,45 +58,45 @@ enum ia_css_cell_type {
        MAX_NUM_OF_CELLS
 };
 
-/** Firmware types.
+/* Firmware types.
  */
 enum ia_css_fw_type {
-       ia_css_sp_firmware,             /**< Firmware for the SP */
-       ia_css_isp_firmware,    /**< Firmware for the ISP */
-       ia_css_bootloader_firmware, /**< Firmware for the BootLoader */
-       ia_css_acc_firmware             /**< Firmware for accelrations */
+       ia_css_sp_firmware,             /** Firmware for the SP */
+       ia_css_isp_firmware,    /** Firmware for the ISP */
+       ia_css_bootloader_firmware, /** Firmware for the BootLoader */
+       ia_css_acc_firmware             /** Firmware for accelrations */
 };
 
 struct ia_css_blob_descr;
 
-/** Blob descriptor.
+/* Blob descriptor.
  * This structure describes an SP or ISP blob.
  * It describes the test, data and bss sections as well as position in a
  * firmware file.
  * For convenience, it contains dynamic data after loading.
  */
 struct ia_css_blob_info {
-       /**< Static blob data */
-       uint32_t offset;                /**< Blob offset in fw file */
-       struct ia_css_isp_param_memory_offsets memory_offsets;  /**< offset wrt hdr in bytes */
-       uint32_t prog_name_offset;  /**< offset wrt hdr in bytes */
-       uint32_t size;                  /**< Size of blob */
-       uint32_t padding_size;  /**< total cummulative of bytes added due to section alignment */
-       uint32_t icache_source; /**< Position of icache in blob */
-       uint32_t icache_size;   /**< Size of icache section */
-       uint32_t icache_padding;/**< bytes added due to icache section alignment */
-       uint32_t text_source;   /**< Position of text in blob */
-       uint32_t text_size;             /**< Size of text section */
-       uint32_t text_padding;  /**< bytes added due to text section alignment */
-       uint32_t data_source;   /**< Position of data in blob */
-       uint32_t data_target;   /**< Start of data in SP dmem */
-       uint32_t data_size;             /**< Size of text section */
-       uint32_t data_padding;  /**< bytes added due to data section alignment */
-       uint32_t bss_target;    /**< Start position of bss in SP dmem */
-       uint32_t bss_size;              /**< Size of bss section */
-       /**< Dynamic data filled by loader */
-       CSS_ALIGN(const void  *code, 8);                /**< Code section absolute pointer within fw, code = icache + text */
-       CSS_ALIGN(const void  *data, 8);                /**< Data section absolute pointer within fw, data = data + bss */
+       /** Static blob data */
+       uint32_t offset;                /** Blob offset in fw file */
+       struct ia_css_isp_param_memory_offsets memory_offsets;  /** offset wrt hdr in bytes */
+       uint32_t prog_name_offset;  /** offset wrt hdr in bytes */
+       uint32_t size;                  /** Size of blob */
+       uint32_t padding_size;  /** total cummulative of bytes added due to section alignment */
+       uint32_t icache_source; /** Position of icache in blob */
+       uint32_t icache_size;   /** Size of icache section */
+       uint32_t icache_padding;/** bytes added due to icache section alignment */
+       uint32_t text_source;   /** Position of text in blob */
+       uint32_t text_size;             /** Size of text section */
+       uint32_t text_padding;  /** bytes added due to text section alignment */
+       uint32_t data_source;   /** Position of data in blob */
+       uint32_t data_target;   /** Start of data in SP dmem */
+       uint32_t data_size;             /** Size of text section */
+       uint32_t data_padding;  /** bytes added due to data section alignment */
+       uint32_t bss_target;    /** Start position of bss in SP dmem */
+       uint32_t bss_size;              /** Size of bss section */
+       /** Dynamic data filled by loader */
+       CSS_ALIGN(const void  *code, 8);                /** Code section absolute pointer within fw, code = icache + text */
+       CSS_ALIGN(const void  *data, 8);                /** Data section absolute pointer within fw, data = data + bss */
 };
 
 struct ia_css_binary_input_info {
@@ -140,9 +140,9 @@ struct ia_css_binary_s3a_info {
        uint32_t                fixed_s3a_deci_log;
 };
 
-/** DPC related binary info */
+/* DPC related binary info */
 struct ia_css_binary_dpc_info {
-       uint32_t                bnr_lite; /**< bnr lite enable flag */
+       uint32_t                bnr_lite; /** bnr lite enable flag */
 };
 
 struct ia_css_binary_iterator_info {
@@ -193,7 +193,7 @@ struct ia_css_binary_block_info {
        uint32_t        output_block_height;
 };
 
-/** Structure describing an ISP binary.
+/* Structure describing an ISP binary.
  * It describes the capabilities of a binary, like the maximum resolution,
  * support features, dma channels, uds features, etc.
  * This part is to be used by the SP.
@@ -210,7 +210,7 @@ struct ia_css_binary_info {
        struct ia_css_binary_dvs_info           dvs;
        struct ia_css_binary_vf_dec_info        vf_dec;
        struct ia_css_binary_s3a_info           s3a;
-       struct ia_css_binary_dpc_info           dpc_bnr; /**< DPC related binary info */
+       struct ia_css_binary_dpc_info           dpc_bnr; /** DPC related binary info */
        struct ia_css_binary_iterator_info      iterator;
        struct ia_css_binary_address_info       addresses;
        struct ia_css_binary_uds_info           uds;
@@ -269,7 +269,7 @@ struct ia_css_binary_info {
        } dma;
 };
 
-/** Structure describing an ISP binary.
+/* Structure describing an ISP binary.
  * It describes the capabilities of a binary, like the maximum resolution,
  * support features, dma channels, uds features, etc.
  */
@@ -281,8 +281,8 @@ struct ia_css_binary_xinfo {
        enum ia_css_acc_type         type;
        CSS_ALIGN(int32_t            num_output_formats, 8);
        enum ia_css_frame_format     output_formats[IA_CSS_FRAME_FORMAT_NUM];
-       CSS_ALIGN(int32_t            num_vf_formats, 8); /**< number of supported vf formats */
-       enum ia_css_frame_format     vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /**< types of supported vf formats */
+       CSS_ALIGN(int32_t            num_vf_formats, 8); /** number of supported vf formats */
+       enum ia_css_frame_format     vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /** types of supported vf formats */
        uint8_t                      num_output_pins;
        ia_css_ptr                   xmem_addr;
        CSS_ALIGN(const struct ia_css_blob_descr *blob, 8);
@@ -291,55 +291,55 @@ struct ia_css_binary_xinfo {
        CSS_ALIGN(struct ia_css_binary_xinfo *next, 8);
 };
 
-/** Structure describing the Bootloader (an ISP binary).
+/* Structure describing the Bootloader (an ISP binary).
  * It contains several address, either in ddr, isp_dmem or
  * the entry function in icache.
  */
 struct ia_css_bl_info {
-       uint32_t num_dma_cmds;  /**< Number of cmds sent by CSS */
-       uint32_t dma_cmd_list;  /**< Dma command list sent by CSS */
-       uint32_t sw_state;      /**< Polled from css */
+       uint32_t num_dma_cmds;  /** Number of cmds sent by CSS */
+       uint32_t dma_cmd_list;  /** Dma command list sent by CSS */
+       uint32_t sw_state;      /** Polled from css */
        /* Entry functions */
-       uint32_t bl_entry;      /**< The SP entry function */
+       uint32_t bl_entry;      /** The SP entry function */
 };
 
-/** Structure describing the SP binary.
+/* Structure describing the SP binary.
  * It contains several address, either in ddr, sp_dmem or
  * the entry function in pmem.
  */
 struct ia_css_sp_info {
-       uint32_t init_dmem_data; /**< data sect config, stored to dmem */
-       uint32_t per_frame_data; /**< Per frame data, stored to dmem */
-       uint32_t group;         /**< Per pipeline data, loaded by dma */
-       uint32_t output;                /**< SP output data, loaded by dmem */
-       uint32_t host_sp_queue; /**< Host <-> SP queues */
-       uint32_t host_sp_com;/**< Host <-> SP commands */
-       uint32_t isp_started;   /**< Polled from sensor thread, csim only */
-       uint32_t sw_state;      /**< Polled from css */
-       uint32_t host_sp_queues_initialized; /**< Polled from the SP */
-       uint32_t sleep_mode;  /**< different mode to halt SP */
-       uint32_t invalidate_tlb;                /**< inform SP to invalidate mmu TLB */
+       uint32_t init_dmem_data; /** data sect config, stored to dmem */
+       uint32_t per_frame_data; /** Per frame data, stored to dmem */
+       uint32_t group;         /** Per pipeline data, loaded by dma */
+       uint32_t output;                /** SP output data, loaded by dmem */
+       uint32_t host_sp_queue; /** Host <-> SP queues */
+       uint32_t host_sp_com;/** Host <-> SP commands */
+       uint32_t isp_started;   /** Polled from sensor thread, csim only */
+       uint32_t sw_state;      /** Polled from css */
+       uint32_t host_sp_queues_initialized; /** Polled from the SP */
+       uint32_t sleep_mode;  /** different mode to halt SP */
+       uint32_t invalidate_tlb;                /** inform SP to invalidate mmu TLB */
 #ifndef ISP2401
-       uint32_t stop_copy_preview;       /**< suspend copy and preview pipe when capture */
+       uint32_t stop_copy_preview;       /** suspend copy and preview pipe when capture */
 #endif
-       uint32_t debug_buffer_ddr_address;      /**< inform SP the address
+       uint32_t debug_buffer_ddr_address;      /** inform SP the address
        of DDR debug queue */
-       uint32_t perf_counter_input_system_error; /**< input system perf
+       uint32_t perf_counter_input_system_error; /** input system perf
        counter array */
 #ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
-       uint32_t debug_wait; /**< thread/pipe post mortem debug */
-       uint32_t debug_stage; /**< thread/pipe post mortem debug */
-       uint32_t debug_stripe; /**< thread/pipe post mortem debug */
+       uint32_t debug_wait; /** thread/pipe post mortem debug */
+       uint32_t debug_stage; /** thread/pipe post mortem debug */
+       uint32_t debug_stripe; /** thread/pipe post mortem debug */
 #endif
-       uint32_t threads_stack; /**< sp thread's stack pointers */
-       uint32_t threads_stack_size; /**< sp thread's stack sizes */
-       uint32_t curr_binary_id;        /**< current binary id */
-       uint32_t raw_copy_line_count;   /**< raw copy line counter */
-       uint32_t ddr_parameter_address; /**< acc param ddrptr, sp dmem */
-       uint32_t ddr_parameter_size;    /**< acc param size, sp dmem */
+       uint32_t threads_stack; /** sp thread's stack pointers */
+       uint32_t threads_stack_size; /** sp thread's stack sizes */
+       uint32_t curr_binary_id;        /** current binary id */
+       uint32_t raw_copy_line_count;   /** raw copy line counter */
+       uint32_t ddr_parameter_address; /** acc param ddrptr, sp dmem */
+       uint32_t ddr_parameter_size;    /** acc param size, sp dmem */
        /* Entry functions */
-       uint32_t sp_entry;      /**< The SP entry function */
-       uint32_t tagger_frames_addr;   /**< Base address of tagger state */
+       uint32_t sp_entry;      /** The SP entry function */
+       uint32_t tagger_frames_addr;   /** Base address of tagger state */
 };
 
 /* The following #if is there because this header file is also included
@@ -348,37 +348,37 @@ struct ia_css_sp_info {
    More permanent solution will be to refactor this include.
 */
 #if !defined(__ISP)
-/** Accelerator firmware information.
+/* Accelerator firmware information.
  */
 struct ia_css_acc_info {
-       uint32_t per_frame_data; /**< Dummy for now */
+       uint32_t per_frame_data; /** Dummy for now */
 };
 
-/** Firmware information.
+/* Firmware information.
  */
 union ia_css_fw_union {
-       struct ia_css_binary_xinfo      isp; /**< ISP info */
-       struct ia_css_sp_info           sp;  /**< SP info */
-       struct ia_css_bl_info           bl;  /**< Bootloader info */
-       struct ia_css_acc_info          acc; /**< Accelerator info */
+       struct ia_css_binary_xinfo      isp; /** ISP info */
+       struct ia_css_sp_info           sp;  /** SP info */
+       struct ia_css_bl_info           bl;  /** Bootloader info */
+       struct ia_css_acc_info          acc; /** Accelerator info */
 };
 
-/** Firmware information.
+/* Firmware information.
  */
 struct ia_css_fw_info {
-       size_t                   header_size; /**< size of fw header */
+       size_t                   header_size; /** size of fw header */
        CSS_ALIGN(uint32_t type, 8);
-       union ia_css_fw_union    info; /**< Binary info */
-       struct ia_css_blob_info  blob; /**< Blob info */
+       union ia_css_fw_union    info; /** Binary info */
+       struct ia_css_blob_info  blob; /** Blob info */
        /* Dynamic part */
        struct ia_css_fw_info   *next;
-       CSS_ALIGN(uint32_t       loaded, 8);    /**< Firmware has been loaded */
-       CSS_ALIGN(const uint8_t *isp_code, 8);  /**< ISP pointer to code */
-       /**< Firmware handle between user space and kernel */
+       CSS_ALIGN(uint32_t       loaded, 8);    /** Firmware has been loaded */
+       CSS_ALIGN(const uint8_t *isp_code, 8);  /** ISP pointer to code */
+       /** Firmware handle between user space and kernel */
        CSS_ALIGN(uint32_t      handle, 8);
-       /**< Sections to copy from/to ISP */
+       /** Sections to copy from/to ISP */
        struct ia_css_isp_param_css_segments mem_initializers;
-       /**< Initializer for local ISP memories */
+       /** Initializer for local ISP memories */
 };
 
 struct ia_css_blob_descr {
@@ -390,39 +390,39 @@ struct ia_css_blob_descr {
 
 struct ia_css_acc_fw;
 
-/** Structure describing the SP binary of a stand-alone accelerator.
+/* Structure describing the SP binary of a stand-alone accelerator.
  */
 struct ia_css_acc_sp {
-       void (*init)(struct ia_css_acc_fw *);   /**< init for crun */
-       uint32_t sp_prog_name_offset;           /**< program name offset wrt hdr in bytes */
-       uint32_t sp_blob_offset;                /**< blob offset wrt hdr in bytes */
-       void     *entry;                        /**< Address of sp entry point */
-       uint32_t *css_abort;                    /**< SP dmem abort flag */
-       void     *isp_code;                     /**< SP dmem address holding xmem
+       void (*init)(struct ia_css_acc_fw *);   /** init for crun */
+       uint32_t sp_prog_name_offset;           /** program name offset wrt hdr in bytes */
+       uint32_t sp_blob_offset;                /** blob offset wrt hdr in bytes */
+       void     *entry;                        /** Address of sp entry point */
+       uint32_t *css_abort;                    /** SP dmem abort flag */
+       void     *isp_code;                     /** SP dmem address holding xmem
                                                     address of isp code */
-       struct ia_css_fw_info fw;               /**< SP fw descriptor */
-       const uint8_t *code;                    /**< ISP pointer of allocated SP code */
+       struct ia_css_fw_info fw;               /** SP fw descriptor */
+       const uint8_t *code;                    /** ISP pointer of allocated SP code */
 };
 
-/** Acceleration firmware descriptor.
+/* Acceleration firmware descriptor.
   * This descriptor descibes either SP code (stand-alone), or
   * ISP code (a separate pipeline stage).
   */
 struct ia_css_acc_fw_hdr {
-       enum ia_css_acc_type type;      /**< Type of accelerator */
-       uint32_t        isp_prog_name_offset; /**< program name offset wrt
+       enum ia_css_acc_type type;      /** Type of accelerator */
+       uint32_t        isp_prog_name_offset; /** program name offset wrt
                                                   header in bytes */
-       uint32_t        isp_blob_offset;      /**< blob offset wrt header
+       uint32_t        isp_blob_offset;      /** blob offset wrt header
                                                   in bytes */
-       uint32_t        isp_size;             /**< Size of isp blob */
-       const uint8_t  *isp_code;             /**< ISP pointer to code */
-       struct ia_css_acc_sp  sp;  /**< Standalone sp code */
-       /**< Firmware handle between user space and kernel */
+       uint32_t        isp_size;             /** Size of isp blob */
+       const uint8_t  *isp_code;             /** ISP pointer to code */
+       struct ia_css_acc_sp  sp;  /** Standalone sp code */
+       /** Firmware handle between user space and kernel */
        uint32_t        handle;
-       struct ia_css_data parameters; /**< Current SP parameters */
+       struct ia_css_data parameters; /** Current SP parameters */
 };
 
-/** Firmware structure.
+/* Firmware structure.
   * This contains the header and actual blobs.
   * For standalone, it contains SP and ISP blob.
   * For a pipeline stage accelerator, it contains ISP code only.
@@ -430,7 +430,7 @@ struct ia_css_acc_fw_hdr {
   * header and computed using the access macros below.
   */
 struct ia_css_acc_fw {
-       struct ia_css_acc_fw_hdr header; /**< firmware header */
+       struct ia_css_acc_fw_hdr header; /** firmware header */
        /*
        int8_t   isp_progname[];          **< ISP program name
        int8_t   sp_progname[];   **< SP program name, stand-alone only
index b2ecf36..a0058ea 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_BUFFER_H
 #define __IA_CSS_BUFFER_H
 
-/** @file
+/* @file
  * This file contains datastructures and types for buffers used in CSS
  */
 
@@ -23,7 +23,7 @@
 #include "ia_css_types.h"
 #include "ia_css_timer.h"
 
-/** Enumeration of buffer types. Buffers can be queued and de-queued
+/* Enumeration of buffer types. Buffers can be queued and de-queued
  *  to hand them over between IA and ISP.
  */
 enum ia_css_buffer_type {
@@ -48,28 +48,28 @@ enum ia_css_buffer_type {
 
 /* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */
 #if !defined(__ISP)
-/** Buffer structure. This is a container structure that enables content
+/* Buffer structure. This is a container structure that enables content
  *  independent buffer queues and access functions.
  */
 struct ia_css_buffer {
-       enum ia_css_buffer_type type; /**< Buffer type. */
+       enum ia_css_buffer_type type; /** Buffer type. */
        unsigned int exp_id;
-       /**< exposure id for this buffer; 0 = not available
+       /** exposure id for this buffer; 0 = not available
             see ia_css_event_public.h for more detail. */
        union {
-               struct ia_css_isp_3a_statistics  *stats_3a;    /**< 3A statistics & optionally RGBY statistics. */
-               struct ia_css_isp_dvs_statistics *stats_dvs;   /**< DVS statistics. */
-               struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs;  /**< SKC DVS statistics. */
-               struct ia_css_frame              *frame;       /**< Frame buffer. */
-               struct ia_css_acc_param          *custom_data; /**< Custom buffer. */
-               struct ia_css_metadata           *metadata;    /**< Sensor metadata. */
-       } data; /**< Buffer data pointer. */
-       uint64_t driver_cookie; /**< cookie for the driver */
-       struct ia_css_time_meas timing_data; /**< timing data (readings from the timer) */
-       struct ia_css_clock_tick isys_eof_clock_tick; /**< ISYS's end of frame timer tick*/
+               struct ia_css_isp_3a_statistics  *stats_3a;    /** 3A statistics & optionally RGBY statistics. */
+               struct ia_css_isp_dvs_statistics *stats_dvs;   /** DVS statistics. */
+               struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs;  /** SKC DVS statistics. */
+               struct ia_css_frame              *frame;       /** Frame buffer. */
+               struct ia_css_acc_param          *custom_data; /** Custom buffer. */
+               struct ia_css_metadata           *metadata;    /** Sensor metadata. */
+       } data; /** Buffer data pointer. */
+       uint64_t driver_cookie; /** cookie for the driver */
+       struct ia_css_time_meas timing_data; /** timing data (readings from the timer) */
+       struct ia_css_clock_tick isys_eof_clock_tick; /** ISYS's end of frame timer tick*/
 };
 
-/** @brief Dequeue param buffers from sp2host_queue
+/* @brief Dequeue param buffers from sp2host_queue
  *
  * @return                                       None
  *
index a15d3e3..021a313 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_CONTROL_H
 #define __IA_CSS_CONTROL_H
 
-/** @file
+/* @file
  * This file contains functionality for starting and controlling CSS
  */
 
@@ -24,7 +24,7 @@
 #include <ia_css_firmware.h>
 #include <ia_css_irq.h>
 
-/** @brief Initialize the CSS API.
+/* @brief Initialize the CSS API.
  * @param[in]  env             Environment, provides functions to access the
  *                             environment in which the CSS code runs. This is
  *                             used for host side memory access and message
@@ -51,7 +51,7 @@ enum ia_css_err ia_css_init(
        uint32_t                 l1_base,
        enum ia_css_irq_type     irq_type);
 
-/** @brief Un-initialize the CSS API.
+/* @brief Un-initialize the CSS API.
  * @return     None
  *
  * This function deallocates all memory that has been allocated by the CSS API
@@ -66,7 +66,7 @@ enum ia_css_err ia_css_init(
 void
 ia_css_uninit(void);
 
-/** @brief Suspend CSS API for power down
+/* @brief Suspend CSS API for power down
  * @return     success or faulure code
  *
  * suspend shuts down the system by:
@@ -80,7 +80,7 @@ ia_css_uninit(void);
 enum ia_css_err
 ia_css_suspend(void);
 
-/** @brief Resume CSS API from power down
+/* @brief Resume CSS API from power down
  * @return     success or failure code
  *
  * After a power cycle, this function will bring the CSS API back into
@@ -91,7 +91,7 @@ ia_css_suspend(void);
 enum ia_css_err
 ia_css_resume(void);
 
-/** @brief Enable use of a separate queue for ISYS events.
+/* @brief Enable use of a separate queue for ISYS events.
  *
  * @param[in]  enable: enable or disable use of separate ISYS event queues.
  * @return             error if called when SP is running.
@@ -105,7 +105,7 @@ ia_css_resume(void);
 enum ia_css_err
 ia_css_enable_isys_event_queue(bool enable);
 
-/** @brief Test whether the ISP has started.
+/* @brief Test whether the ISP has started.
  *
  * @return     Boolean flag true if the ISP has started or false otherwise.
  *
@@ -114,7 +114,7 @@ ia_css_enable_isys_event_queue(bool enable);
 bool
 ia_css_isp_has_started(void);
 
-/** @brief Test whether the SP has initialized.
+/* @brief Test whether the SP has initialized.
  *
  * @return     Boolean flag true if the SP has initialized or false otherwise.
  *
@@ -123,7 +123,7 @@ ia_css_isp_has_started(void);
 bool
 ia_css_sp_has_initialized(void);
 
-/** @brief Test whether the SP has terminated.
+/* @brief Test whether the SP has terminated.
  *
  * @return     Boolean flag true if the SP has terminated or false otherwise.
  *
@@ -132,7 +132,7 @@ ia_css_sp_has_initialized(void);
 bool
 ia_css_sp_has_terminated(void);
 
-/** @brief start SP hardware
+/* @brief start SP hardware
  *
  * @return                     IA_CSS_SUCCESS or error code upon error.
  *
@@ -144,7 +144,7 @@ enum ia_css_err
 ia_css_start_sp(void);
 
 
-/** @brief stop SP hardware
+/* @brief stop SP hardware
  *
  * @return                     IA_CSS_SUCCESS or error code upon error.
  *
index 59459f7..84a960b 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef _IA_CSS_DEVICE_ACCESS_H
 #define _IA_CSS_DEVICE_ACCESS_H
 
-/** @file
+/* @file
  * File containing internal functions for the CSS-API to access the CSS device.
  */
 
index 147bf81..1f01534 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_DVS_H
 #define __IA_CSS_DVS_H
 
-/** @file
+/* @file
  * This file contains types for DVS statistics
  */
 
@@ -31,7 +31,7 @@ enum dvs_statistics_type {
 };
 
 
-/** Structure that holds DVS statistics in the ISP internal
+/* Structure that holds DVS statistics in the ISP internal
  * format. Use ia_css_get_dvs_statistics() to translate
  * this to the format used on the host (DVS engine).
  * */
@@ -40,12 +40,12 @@ struct ia_css_isp_dvs_statistics {
        ia_css_ptr ver_proj;
        uint32_t   hor_size;
        uint32_t   ver_size;
-       uint32_t   exp_id;   /**< see ia_css_event_public.h for more detail */
+       uint32_t   exp_id;   /** see ia_css_event_public.h for more detail */
        ia_css_ptr data_ptr; /* base pointer containing all memory */
        uint32_t   size;     /* size of allocated memory in data_ptr */
 };
 
-/** Structure that holds SKC DVS statistics in the ISP internal
+/* Structure that holds SKC DVS statistics in the ISP internal
  * format. Use ia_css_dvs_statistics_get() to translate this to
  * the format used on the host.
  * */
@@ -82,7 +82,7 @@ union ia_css_dvs_statistics_host {
        struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host;
 };
 
-/** @brief Copy DVS statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
  * @param[in]  host_stats Host buffer
  * @param[in]  isp_stats ISP buffer
  * @return     error value if temporary memory cannot be allocated
@@ -100,7 +100,7 @@ enum ia_css_err
 ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats,
                          const struct ia_css_isp_dvs_statistics *isp_stats);
 
-/** @brief Translate DVS statistics from ISP format to host format
+/* @brief Translate DVS statistics from ISP format to host format
  * @param[in]  host_stats Host buffer
  * @param[in]  isp_stats ISP buffer
  * @return     None
@@ -116,7 +116,7 @@ ia_css_translate_dvs_statistics(
                struct ia_css_dvs_statistics *host_stats,
                const struct ia_css_isp_dvs_statistics_map *isp_stats);
 
-/** @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
  * @param[in]  host_stats Host buffer
  * @param[in]  isp_stats ISP buffer
  * @return     error value if temporary memory cannot be allocated
@@ -134,7 +134,7 @@ enum ia_css_err
 ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats,
                           const struct ia_css_isp_dvs_statistics *isp_stats);
 
-/** @brief Translate DVS2 statistics from ISP format to host format
+/* @brief Translate DVS2 statistics from ISP format to host format
  * @param[in]  host_stats Host buffer
  * @param[in]  isp_stats ISP buffer
  * @return             None
@@ -150,7 +150,7 @@ ia_css_translate_dvs2_statistics(
                struct ia_css_dvs2_statistics      *host_stats,
                const struct ia_css_isp_dvs_statistics_map *isp_stats);
 
-/** @brief Copy DVS statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
  * @param[in] type - DVS statistics type
  * @param[in] host_stats Host buffer
  * @param[in] isp_stats ISP buffer
@@ -161,105 +161,105 @@ ia_css_dvs_statistics_get(enum dvs_statistics_type type,
                          union ia_css_dvs_statistics_host  *host_stats,
                          const union ia_css_dvs_statistics_isp *isp_stats);
 
-/** @brief Allocate the DVS statistics memory on the ISP
+/* @brief Allocate the DVS statistics memory on the ISP
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS statistics buffer on the ISP
 */
 struct ia_css_isp_dvs_statistics *
 ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS statistics memory on the ISP
+/* @brief Free the DVS statistics memory on the ISP
  * @param[in]  me Pointer to the DVS statistics buffer on the ISP.
  * @return     None
 */
 void
 ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me);
 
-/** @brief Allocate the DVS 2.0 statistics memory
+/* @brief Allocate the DVS 2.0 statistics memory
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS statistics buffer on the ISP
 */
 struct ia_css_isp_dvs_statistics *
 ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS 2.0 statistics memory
+/* @brief Free the DVS 2.0 statistics memory
  * @param[in]  me Pointer to the DVS statistics buffer on the ISP.
  * @return     None
 */
 void
 ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me);
 
-/** @brief Allocate the DVS statistics memory on the host
+/* @brief Allocate the DVS statistics memory on the host
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS statistics buffer on the host
 */
 struct ia_css_dvs_statistics *
 ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS statistics memory on the host
+/* @brief Free the DVS statistics memory on the host
  * @param[in]  me Pointer to the DVS statistics buffer on the host.
  * @return     None
 */
 void
 ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me);
 
-/** @brief Allocate the DVS coefficients memory
+/* @brief Allocate the DVS coefficients memory
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS coefficients buffer
 */
 struct ia_css_dvs_coefficients *
 ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS coefficients memory
+/* @brief Free the DVS coefficients memory
  * @param[in]  me Pointer to the DVS coefficients buffer.
  * @return     None
  */
 void
 ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me);
 
-/** @brief Allocate the DVS 2.0 statistics memory on the host
+/* @brief Allocate the DVS 2.0 statistics memory on the host
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS 2.0 statistics buffer on the host
  */
 struct ia_css_dvs2_statistics *
 ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS 2.0 statistics memory
+/* @brief Free the DVS 2.0 statistics memory
  * @param[in]  me Pointer to the DVS 2.0 statistics buffer on the host.
  * @return     None
 */
 void
 ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me);
 
-/** @brief Allocate the DVS 2.0 coefficients memory
+/* @brief Allocate the DVS 2.0 coefficients memory
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS 2.0 coefficients buffer
 */
 struct ia_css_dvs2_coefficients *
 ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS 2.0 coefficients memory
+/* @brief Free the DVS 2.0 coefficients memory
  * @param[in]  me Pointer to the DVS 2.0 coefficients buffer.
  * @return     None
 */
 void
 ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me);
 
-/** @brief Allocate the DVS 2.0 6-axis config memory
+/* @brief Allocate the DVS 2.0 6-axis config memory
  * @param[in]  stream The stream.
  * @return     Pointer to the allocated DVS 6axis configuration buffer
 */
 struct ia_css_dvs_6axis_config *
 ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream);
 
-/** @brief Free the DVS 2.0 6-axis config memory
+/* @brief Free the DVS 2.0 6-axis config memory
  * @param[in]  dvs_6axis_config Pointer to the DVS 6axis configuration buffer
  * @return     None
  */
 void
 ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config);
 
-/** @brief Allocate a dvs statistics map structure
+/* @brief Allocate a dvs statistics map structure
  * @param[in]  isp_stats pointer to ISP dvs statistis struct
  * @param[in]  data_ptr  host-side pointer to ISP dvs statistics.
  * @return     Pointer to the allocated dvs statistics map
@@ -280,7 +280,7 @@ ia_css_isp_dvs_statistics_map_allocate(
        const struct ia_css_isp_dvs_statistics *isp_stats,
        void *data_ptr);
 
-/** @brief Free the dvs statistics map
+/* @brief Free the dvs statistics map
  * @param[in]  me Pointer to the dvs statistics map
  * @return     None
  *
@@ -291,7 +291,7 @@ ia_css_isp_dvs_statistics_map_allocate(
 void
 ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me);
 
-/** @brief Allocate memory for the SKC DVS statistics on the ISP
+/* @brief Allocate memory for the SKC DVS statistics on the ISP
  * @return             Pointer to the allocated ACC DVS statistics buffer on the ISP
 */
 struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void);
index 1ae9daf..8b0218e 100644 (file)
 #include "ia_css_types.h"
 #include "ia_css_acc_types.h"
 
-/** @file
+/* @file
  * This file contains prototypes for functions that need to be provided to the
  * CSS-API host-code by the environment in which the CSS-API code runs.
  */
 
-/** Memory allocation attributes, for use in ia_css_css_mem_env. */
+/* Memory allocation attributes, for use in ia_css_css_mem_env. */
 enum ia_css_mem_attr {
        IA_CSS_MEM_ATTR_CACHED = 1 << 0,
        IA_CSS_MEM_ATTR_ZEROED = 1 << 1,
@@ -33,62 +33,62 @@ enum ia_css_mem_attr {
        IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3,
 };
 
-/** Environment with function pointers for local IA memory allocation.
+/* Environment with function pointers for local IA memory allocation.
  *  This provides the CSS code with environment specific functionality
  *  for memory allocation of small local buffers such as local data structures.
  *  This is never expected to allocate more than one page of memory (4K bytes).
  */
 struct ia_css_cpu_mem_env {
        void (*flush)(struct ia_css_acc_fw *fw);
-       /**< Flush function to flush the cache for given accelerator. */
+       /** Flush function to flush the cache for given accelerator. */
 };
 
-/** Environment with function pointers to access the CSS hardware. This includes
+/* Environment with function pointers to access the CSS hardware. This includes
  *  registers and local memories.
  */
 struct ia_css_hw_access_env {
        void (*store_8)(hrt_address addr, uint8_t data);
-       /**< Store an 8 bit value into an address in the CSS HW address space.
+       /** Store an 8 bit value into an address in the CSS HW address space.
             The address must be an 8 bit aligned address. */
        void (*store_16)(hrt_address addr, uint16_t data);
-       /**< Store a 16 bit value into an address in the CSS HW address space.
+       /** Store a 16 bit value into an address in the CSS HW address space.
             The address must be a 16 bit aligned address. */
        void (*store_32)(hrt_address addr, uint32_t data);
-       /**< Store a 32 bit value into an address in the CSS HW address space.
+       /** Store a 32 bit value into an address in the CSS HW address space.
             The address must be a 32 bit aligned address. */
        uint8_t (*load_8)(hrt_address addr);
-       /**< Load an 8 bit value from an address in the CSS HW address
+       /** Load an 8 bit value from an address in the CSS HW address
             space. The address must be an 8 bit aligned address. */
        uint16_t (*load_16)(hrt_address addr);
-       /**< Load a 16 bit value from an address in the CSS HW address
+       /** Load a 16 bit value from an address in the CSS HW address
             space. The address must be a 16 bit aligned address. */
        uint32_t (*load_32)(hrt_address addr);
-       /**< Load a 32 bit value from an address in the CSS HW address
+       /** Load a 32 bit value from an address in the CSS HW address
             space. The address must be a 32 bit aligned address. */
        void (*store)(hrt_address addr, const void *data, uint32_t bytes);
-       /**< Store a number of bytes into a byte-aligned address in the CSS HW address space. */
+       /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */
        void (*load)(hrt_address addr, void *data, uint32_t bytes);
-       /**< Load a number of bytes from a byte-aligned address in the CSS HW address space. */
+       /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */
 };
 
-/** Environment with function pointers to print error and debug messages.
+/* Environment with function pointers to print error and debug messages.
  */
 struct ia_css_print_env {
        int (*debug_print)(const char *fmt, va_list args);
-       /**< Print a debug message. */
+       /** Print a debug message. */
        int (*error_print)(const char *fmt, va_list args);
-       /**< Print an error message.*/
+       /** Print an error message.*/
 };
 
-/** Environment structure. This includes function pointers to access several
+/* Environment structure. This includes function pointers to access several
  *  features provided by the environment in which the CSS API is used.
  *  This is used to run the camera IP in multiple platforms such as Linux,
  *  Windows and several simulation environments.
  */
 struct ia_css_env {
-       struct ia_css_cpu_mem_env   cpu_mem_env;   /**< local flush. */
-       struct ia_css_hw_access_env hw_access_env; /**< CSS HW access functions */
-       struct ia_css_print_env     print_env;     /**< Message printing env. */
+       struct ia_css_cpu_mem_env   cpu_mem_env;   /** local flush. */
+       struct ia_css_hw_access_env hw_access_env; /** CSS HW access functions */
+       struct ia_css_print_env     print_env;     /** Message printing env. */
 };
 
 #endif /* __IA_CSS_ENV_H */
index 572e4e5..cf89581 100644 (file)
 #ifndef __IA_CSS_ERR_H
 #define __IA_CSS_ERR_H
 
-/** @file
+/* @file
  * This file contains possible return values for most
  * functions in the CSS-API.
  */
 
-/** Errors, these values are used as the return value for most
+/* Errors, these values are used as the return value for most
  *  functions in this API.
  */
 enum ia_css_err {
@@ -41,22 +41,22 @@ enum ia_css_err {
        IA_CSS_ERR_NOT_SUPPORTED
 };
 
-/** FW warnings. This enum contains a value for each warning that
+/* FW warnings. This enum contains a value for each warning that
  * the SP FW could indicate potential performance issue
  */
 enum ia_css_fw_warning {
        IA_CSS_FW_WARNING_NONE,
-       IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the ISys queue.
+       IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the ISys queue.
                This warning can be avoided by de-queing ISYS buffers more timely. */
-       IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the PSys queue.
+       IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the PSys queue.
                This warning can be avoided by de-queing PSYS buffers more timely. */
-       IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /** < CSS system delayed because of insufficient available buffers.
+       IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /* < CSS system delayed because of insufficient available buffers.
                This warning can be avoided by unlocking locked frame-buffers more timely. */
-       IA_CSS_FW_WARNING_EXP_ID_LOCKED, /** < Exposure ID skipped because the frame associated to it was still locked.
+       IA_CSS_FW_WARNING_EXP_ID_LOCKED, /* < Exposure ID skipped because the frame associated to it was still locked.
                This warning can be avoided by unlocking locked frame-buffers more timely. */
-       IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /** < Exposure ID cannot be found on the circular buffer.
+       IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /* < Exposure ID cannot be found on the circular buffer.
                This warning can be avoided by unlocking locked frame-buffers more timely. */
-       IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /** < Frame and param pair mismatched in tagger.
+       IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /* < Frame and param pair mismatched in tagger.
                This warning can be avoided by providing a param set for each frame. */
 };
 
index aaf3497..036a2f0 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_EVENT_PUBLIC_H
 #define __IA_CSS_EVENT_PUBLIC_H
 
-/** @file
+/* @file
  * This file contains CSS-API events functionality
  */
 
@@ -24,7 +24,7 @@
 #include <ia_css_types.h>      /* ia_css_pipe */
 #include <ia_css_timer.h>      /* ia_css_timer */
 
-/** The event type, distinguishes the kind of events that
+/* The event type, distinguishes the kind of events that
  * can are generated by the CSS system.
  *
  * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
  */
 enum ia_css_event_type {
        IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE             = 1 << 0,
-       /**< Output frame ready. */
+       /** Output frame ready. */
        IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE      = 1 << 1,
-       /**< Second output frame ready. */
+       /** Second output frame ready. */
        IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE          = 1 << 2,
-       /**< Viewfinder Output frame ready. */
+       /** Viewfinder Output frame ready. */
        IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE   = 1 << 3,
-       /**< Second viewfinder Output frame ready. */
+       /** Second viewfinder Output frame ready. */
        IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE            = 1 << 4,
-       /**< Indication that 3A statistics are available. */
+       /** Indication that 3A statistics are available. */
        IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE           = 1 << 5,
-       /**< Indication that DIS statistics are available. */
+       /** Indication that DIS statistics are available. */
        IA_CSS_EVENT_TYPE_PIPELINE_DONE                 = 1 << 6,
-       /**< Pipeline Done event, sent after last pipeline stage. */
+       /** Pipeline Done event, sent after last pipeline stage. */
        IA_CSS_EVENT_TYPE_FRAME_TAGGED                  = 1 << 7,
-       /**< Frame tagged. */
+       /** Frame tagged. */
        IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE              = 1 << 8,
-       /**< Input frame ready. */
+       /** Input frame ready. */
        IA_CSS_EVENT_TYPE_METADATA_DONE                 = 1 << 9,
-       /**< Metadata ready. */
+       /** Metadata ready. */
        IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE          = 1 << 10,
-       /**< Indication that LACE statistics are available. */
+       /** Indication that LACE statistics are available. */
        IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE            = 1 << 11,
-       /**< Extension stage complete. */
+       /** Extension stage complete. */
        IA_CSS_EVENT_TYPE_TIMER                         = 1 << 12,
-       /**< Timer event for measuring the SP side latencies. It contains the
+       /** Timer event for measuring the SP side latencies. It contains the
              32-bit timer value from the SP */
        IA_CSS_EVENT_TYPE_PORT_EOF                      = 1 << 13,
-       /**< End Of Frame event, sent when in buffered sensor mode. */
+       /** End Of Frame event, sent when in buffered sensor mode. */
        IA_CSS_EVENT_TYPE_FW_WARNING                    = 1 << 14,
-       /**< Performance warning encounter by FW */
+       /** Performance warning encounter by FW */
        IA_CSS_EVENT_TYPE_FW_ASSERT                     = 1 << 15,
-       /**< Assertion hit by FW */
+       /** Assertion hit by FW */
 };
 
 #define IA_CSS_EVENT_TYPE_NONE 0
 
-/** IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
+/* IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
  * The other events (such as PORT_EOF) cannot be enabled/disabled
  * and are hence excluded from this macro.
  */
@@ -89,7 +89,7 @@ enum ia_css_event_type {
         IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE         | \
         IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE)
 
-/** The event struct, container for the event type and its related values.
+/* The event struct, container for the event type and its related values.
  * Depending on the event type, either pipe or port will be filled.
  * Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle.
  * For non pipeline related events (but i.e. stream specific, like EOF event), the port will be
@@ -97,14 +97,14 @@ enum ia_css_event_type {
  */
 struct ia_css_event {
        struct ia_css_pipe    *pipe;
-       /**< Pipe handle on which event happened, NULL for non pipe related
+       /** Pipe handle on which event happened, NULL for non pipe related
             events. */
        enum ia_css_event_type type;
-       /**< Type of Event, always valid/filled. */
+       /** Type of Event, always valid/filled. */
        uint8_t                port;
-       /**< Port number for EOF event (not valid for other events). */
+       /** Port number for EOF event (not valid for other events). */
        uint8_t                exp_id;
-       /**< Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
+       /** Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
             The exposure ID is unique only within a logical stream and it is
             only generated on systems that have an input system (such as 2400
             and 2401).
@@ -120,26 +120,26 @@ struct ia_css_event {
             in the exposure IDs. Therefor applications should not use this
             to detect frame drops. */
        uint32_t               fw_handle;
-       /**< Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
+       /** Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
             events). */
        enum ia_css_fw_warning fw_warning;
-       /**< Firmware warning code, only for WARNING events. */
+       /** Firmware warning code, only for WARNING events. */
        uint8_t                fw_assert_module_id;
-       /**< Firmware module id, only for ASSERT events, should be logged by driver. */
+       /** Firmware module id, only for ASSERT events, should be logged by driver. */
        uint16_t               fw_assert_line_no;
-       /**< Firmware line number, only for ASSERT events, should be logged by driver. */
+       /** Firmware line number, only for ASSERT events, should be logged by driver. */
        clock_value_t          timer_data;
-       /**< For storing the full 32-bit of the timer value. Valid only for TIMER
+       /** For storing the full 32-bit of the timer value. Valid only for TIMER
             event */
        uint8_t                timer_code;
-       /**< For storing the code of the TIMER event. Valid only for
+       /** For storing the code of the TIMER event. Valid only for
             TIMER event */
        uint8_t                timer_subcode;
-       /**< For storing the subcode of the TIMER event. Valid only
+       /** For storing the subcode of the TIMER event. Valid only
             for TIMER event */
 };
 
-/** @brief Dequeue a PSYS event from the CSS system.
+/* @brief Dequeue a PSYS event from the CSS system.
  *
  * @param[out] event   Pointer to the event struct which will be filled by
  *                      this function if an event is available.
@@ -156,7 +156,7 @@ struct ia_css_event {
 enum ia_css_err
 ia_css_dequeue_psys_event(struct ia_css_event *event);
 
-/** @brief Dequeue an event from the CSS system.
+/* @brief Dequeue an event from the CSS system.
  *
  * @param[out] event   Pointer to the event struct which will be filled by
  *                      this function if an event is available.
@@ -171,7 +171,7 @@ ia_css_dequeue_psys_event(struct ia_css_event *event);
 enum ia_css_err
 ia_css_dequeue_event(struct ia_css_event *event);
 
-/** @brief Dequeue an ISYS event from the CSS system.
+/* @brief Dequeue an ISYS event from the CSS system.
  *
  * @param[out] event   Pointer to the event struct which will be filled by
  *                      this function if an event is available.
index 06d375a..d7d7f0a 100644 (file)
 #ifndef __IA_CSS_FIRMWARE_H
 #define __IA_CSS_FIRMWARE_H
 
-/** @file
+/* @file
  * This file contains firmware loading/unloading support functionality
  */
 
 #include "ia_css_err.h"
 #include "ia_css_env.h"
 
-/** CSS firmware package structure.
+/* CSS firmware package structure.
  */
 struct ia_css_fw {
-       void        *data;  /**< pointer to the firmware data */
-       unsigned int bytes; /**< length in bytes of firmware data */
+       void        *data;  /** pointer to the firmware data */
+       unsigned int bytes; /** length in bytes of firmware data */
 };
 
-/** @brief Loads the firmware
+/* @brief Loads the firmware
  * @param[in]  env             Environment, provides functions to access the
  *                             environment in which the CSS code runs. This is
  *                             used for host side memory access and message
@@ -51,7 +51,7 @@ enum ia_css_err
 ia_css_load_firmware(const struct ia_css_env *env,
            const struct ia_css_fw  *fw);
 
-/** @brief Unloads the firmware
+/* @brief Unloads the firmware
  * @return     None
  *
  * This function unloads the firmware loaded by ia_css_load_firmware.
@@ -61,7 +61,7 @@ ia_css_load_firmware(const struct ia_css_env *env,
 void
 ia_css_unload_firmware(void);
 
-/** @brief Checks firmware version
+/* @brief Checks firmware version
  * @param[in]  fw      Firmware package containing the firmware for all
  *                     predefined ISP binaries.
  * @return             Returns true when the firmware version matches with the CSS
index da9c601..e5ffc57 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef _IA_CSS_FRAC_H
 #define _IA_CSS_FRAC_H
 
-/** @file
+/* @file
  * This file contains typedefs used for fractional numbers
  */
 
  * NOTE: the 16 bit fixed point types actually occupy 32 bits
  * to save on extension operations in the ISP code.
  */
-/** Unsigned fixed point value, 0 integer bits, 16 fractional bits */
+/* Unsigned fixed point value, 0 integer bits, 16 fractional bits */
 typedef uint32_t ia_css_u0_16;
-/** Unsigned fixed point value, 5 integer bits, 11 fractional bits */
+/* Unsigned fixed point value, 5 integer bits, 11 fractional bits */
 typedef uint32_t ia_css_u5_11;
-/** Unsigned fixed point value, 8 integer bits, 8 fractional bits */
+/* Unsigned fixed point value, 8 integer bits, 8 fractional bits */
 typedef uint32_t ia_css_u8_8;
-/** Signed fixed point value, 0 integer bits, 15 fractional bits */
+/* Signed fixed point value, 0 integer bits, 15 fractional bits */
 typedef int32_t ia_css_s0_15;
 
 #endif /* _IA_CSS_FRAC_H */
index d534fbd..2f177ed 100644 (file)
 #ifndef __IA_CSS_FRAME_FORMAT_H
 #define __IA_CSS_FRAME_FORMAT_H
 
-/** @file
+/* @file
  * This file contains information about formats supported in the ISP
  */
 
-/** Frame formats, some of these come from fourcc.org, others are
+/* Frame formats, some of these come from fourcc.org, others are
    better explained by video4linux2. The NV11 seems to be described only
    on MSDN pages, but even those seem to be gone now.
    Frames can come in many forms, the main categories are RAW, RGB and YUV
        - css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h
 */
 enum ia_css_frame_format {
-       IA_CSS_FRAME_FORMAT_NV11 = 0,   /**< 12 bit YUV 411, Y, UV plane */
-       IA_CSS_FRAME_FORMAT_NV12,       /**< 12 bit YUV 420, Y, UV plane */
-       IA_CSS_FRAME_FORMAT_NV12_16,    /**< 16 bit YUV 420, Y, UV plane */
-       IA_CSS_FRAME_FORMAT_NV12_TILEY, /**< 12 bit YUV 420, Intel proprietary tiled format, TileY */
-       IA_CSS_FRAME_FORMAT_NV16,       /**< 16 bit YUV 422, Y, UV plane */
-       IA_CSS_FRAME_FORMAT_NV21,       /**< 12 bit YUV 420, Y, VU plane */
-       IA_CSS_FRAME_FORMAT_NV61,       /**< 16 bit YUV 422, Y, VU plane */
-       IA_CSS_FRAME_FORMAT_YV12,       /**< 12 bit YUV 420, Y, V, U plane */
-       IA_CSS_FRAME_FORMAT_YV16,       /**< 16 bit YUV 422, Y, V, U plane */
-       IA_CSS_FRAME_FORMAT_YUV420,     /**< 12 bit YUV 420, Y, U, V plane */
-       IA_CSS_FRAME_FORMAT_YUV420_16,  /**< yuv420, 16 bits per subpixel */
-       IA_CSS_FRAME_FORMAT_YUV422,     /**< 16 bit YUV 422, Y, U, V plane */
-       IA_CSS_FRAME_FORMAT_YUV422_16,  /**< yuv422, 16 bits per subpixel */
-       IA_CSS_FRAME_FORMAT_UYVY,       /**< 16 bit YUV 422, UYVY interleaved */
-       IA_CSS_FRAME_FORMAT_YUYV,       /**< 16 bit YUV 422, YUYV interleaved */
-       IA_CSS_FRAME_FORMAT_YUV444,     /**< 24 bit YUV 444, Y, U, V plane */
-       IA_CSS_FRAME_FORMAT_YUV_LINE,   /**< Internal format, 2 y lines followed
+       IA_CSS_FRAME_FORMAT_NV11 = 0,   /** 12 bit YUV 411, Y, UV plane */
+       IA_CSS_FRAME_FORMAT_NV12,       /** 12 bit YUV 420, Y, UV plane */
+       IA_CSS_FRAME_FORMAT_NV12_16,    /** 16 bit YUV 420, Y, UV plane */
+       IA_CSS_FRAME_FORMAT_NV12_TILEY, /** 12 bit YUV 420, Intel proprietary tiled format, TileY */
+       IA_CSS_FRAME_FORMAT_NV16,       /** 16 bit YUV 422, Y, UV plane */
+       IA_CSS_FRAME_FORMAT_NV21,       /** 12 bit YUV 420, Y, VU plane */
+       IA_CSS_FRAME_FORMAT_NV61,       /** 16 bit YUV 422, Y, VU plane */
+       IA_CSS_FRAME_FORMAT_YV12,       /** 12 bit YUV 420, Y, V, U plane */
+       IA_CSS_FRAME_FORMAT_YV16,       /** 16 bit YUV 422, Y, V, U plane */
+       IA_CSS_FRAME_FORMAT_YUV420,     /** 12 bit YUV 420, Y, U, V plane */
+       IA_CSS_FRAME_FORMAT_YUV420_16,  /** yuv420, 16 bits per subpixel */
+       IA_CSS_FRAME_FORMAT_YUV422,     /** 16 bit YUV 422, Y, U, V plane */
+       IA_CSS_FRAME_FORMAT_YUV422_16,  /** yuv422, 16 bits per subpixel */
+       IA_CSS_FRAME_FORMAT_UYVY,       /** 16 bit YUV 422, UYVY interleaved */
+       IA_CSS_FRAME_FORMAT_YUYV,       /** 16 bit YUV 422, YUYV interleaved */
+       IA_CSS_FRAME_FORMAT_YUV444,     /** 24 bit YUV 444, Y, U, V plane */
+       IA_CSS_FRAME_FORMAT_YUV_LINE,   /** Internal format, 2 y lines followed
                                             by a uvinterleaved line */
-       IA_CSS_FRAME_FORMAT_RAW,        /**< RAW, 1 plane */
-       IA_CSS_FRAME_FORMAT_RGB565,     /**< 16 bit RGB, 1 plane. Each 3 sub
+       IA_CSS_FRAME_FORMAT_RAW,        /** RAW, 1 plane */
+       IA_CSS_FRAME_FORMAT_RGB565,     /** 16 bit RGB, 1 plane. Each 3 sub
                                             pixels are packed into one 16 bit
                                             value, 5 bits for R, 6 bits for G
                                             and 5 bits for B. */
-       IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /**< 24 bit RGB, 3 planes */
-       IA_CSS_FRAME_FORMAT_RGBA888,    /**< 32 bit RGBA, 1 plane, A=Alpha
+       IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /** 24 bit RGB, 3 planes */
+       IA_CSS_FRAME_FORMAT_RGBA888,    /** 32 bit RGBA, 1 plane, A=Alpha
                                             (alpha is unused) */
-       IA_CSS_FRAME_FORMAT_QPLANE6, /**< Internal, for advanced ISP */
-       IA_CSS_FRAME_FORMAT_BINARY_8,   /**< byte stream, used for jpeg. For
+       IA_CSS_FRAME_FORMAT_QPLANE6, /** Internal, for advanced ISP */
+       IA_CSS_FRAME_FORMAT_BINARY_8,   /** byte stream, used for jpeg. For
                                             frames of this type, we set the
                                             height to 1 and the width to the
                                             number of allocated bytes. */
-       IA_CSS_FRAME_FORMAT_MIPI,       /**< MIPI frame, 1 plane */
-       IA_CSS_FRAME_FORMAT_RAW_PACKED, /**< RAW, 1 plane, packed */
-       IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8,        /**< 8 bit per Y/U/V.
+       IA_CSS_FRAME_FORMAT_MIPI,       /** MIPI frame, 1 plane */
+       IA_CSS_FRAME_FORMAT_RAW_PACKED, /** RAW, 1 plane, packed */
+       IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8,        /** 8 bit per Y/U/V.
                                                           Y odd line; UYVY
                                                           interleaved even line */
-       IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /**< Legacy YUV420. UY odd
+       IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /** Legacy YUV420. UY odd
                                                           line; VY even line */
-       IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10       /**< 10 bit per Y/U/V. Y odd
+       IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10       /** 10 bit per Y/U/V. Y odd
                                                           line; UYVY interleaved
                                                           even line */
 };
@@ -95,7 +95,7 @@ enum ia_css_frame_format {
 /*       because of issues this would cause with the Clockwork code checking tool.               */
 #define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1)
 
-/** Number of valid output frame formats for ISP **/
+/* Number of valid output frame formats for ISP **/
 #define IA_CSS_FRAME_OUT_FORMAT_NUM    (IA_CSS_FRAME_FORMAT_RGBA888 + 1)
 
 #endif /* __IA_CSS_FRAME_FORMAT_H */
index 92f2389..ba7a076 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_FRAME_PUBLIC_H
 #define __IA_CSS_FRAME_PUBLIC_H
 
-/** @file
+/* @file
  * This file contains structs to describe various frame-formats supported by the ISP.
  */
 
 #include "ia_css_frame_format.h"
 #include "ia_css_buffer.h"
 
-/** For RAW input, the bayer order needs to be specified separately. There
+/* For RAW input, the bayer order needs to be specified separately. There
  *  are 4 possible orders. The name is constructed by taking the first two
  *  colors on the first line and the first two colors from the second line.
  */
 enum ia_css_bayer_order {
-       IA_CSS_BAYER_ORDER_GRBG, /**< GRGRGRGRGR .. BGBGBGBGBG */
-       IA_CSS_BAYER_ORDER_RGGB, /**< RGRGRGRGRG .. GBGBGBGBGB */
-       IA_CSS_BAYER_ORDER_BGGR, /**< BGBGBGBGBG .. GRGRGRGRGR */
-       IA_CSS_BAYER_ORDER_GBRG, /**< GBGBGBGBGB .. RGRGRGRGRG */
+       IA_CSS_BAYER_ORDER_GRBG, /** GRGRGRGRGR .. BGBGBGBGBG */
+       IA_CSS_BAYER_ORDER_RGGB, /** RGRGRGRGRG .. GBGBGBGBGB */
+       IA_CSS_BAYER_ORDER_BGGR, /** BGBGBGBGBG .. GRGRGRGRGR */
+       IA_CSS_BAYER_ORDER_GBRG, /** GBGBGBGBGB .. RGRGRGRGRG */
 };
 #define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1)
 
-/** Frame plane structure. This describes one plane in an image
+/* Frame plane structure. This describes one plane in an image
  *  frame buffer.
  */
 struct ia_css_frame_plane {
-       unsigned int height; /**< height of a plane in lines */
-       unsigned int width;  /**< width of a line, in DMA elements, note that
+       unsigned int height; /** height of a plane in lines */
+       unsigned int width;  /** width of a line, in DMA elements, note that
                                  for RGB565 the three subpixels are stored in
                                  one element. For all other formats this is
                                  the number of subpixels per line. */
-       unsigned int stride; /**< stride of a line in bytes */
-       unsigned int offset; /**< offset in bytes to start of frame data.
+       unsigned int stride; /** stride of a line in bytes */
+       unsigned int offset; /** offset in bytes to start of frame data.
                                  offset is wrt data field in ia_css_frame */
 };
 
-/** Binary "plane". This is used to story binary streams such as jpeg
+/* Binary "plane". This is used to story binary streams such as jpeg
  *  images. This is not actually a real plane.
  */
 struct ia_css_frame_binary_plane {
-       unsigned int              size; /**< number of bytes in the stream */
-       struct ia_css_frame_plane data; /**< plane */
+       unsigned int              size; /** number of bytes in the stream */
+       struct ia_css_frame_plane data; /** plane */
 };
 
-/** Container for planar YUV frames. This contains 3 planes.
+/* Container for planar YUV frames. This contains 3 planes.
  */
 struct ia_css_frame_yuv_planes {
-       struct ia_css_frame_plane y; /**< Y plane */
-       struct ia_css_frame_plane u; /**< U plane */
-       struct ia_css_frame_plane v; /**< V plane */
+       struct ia_css_frame_plane y; /** Y plane */
+       struct ia_css_frame_plane u; /** U plane */
+       struct ia_css_frame_plane v; /** V plane */
 };
 
-/** Container for semi-planar YUV frames.
+/* Container for semi-planar YUV frames.
   */
 struct ia_css_frame_nv_planes {
-       struct ia_css_frame_plane y;  /**< Y plane */
-       struct ia_css_frame_plane uv; /**< UV plane */
+       struct ia_css_frame_plane y;  /** Y plane */
+       struct ia_css_frame_plane uv; /** UV plane */
 };
 
-/** Container for planar RGB frames. Each color has its own plane.
+/* Container for planar RGB frames. Each color has its own plane.
  */
 struct ia_css_frame_rgb_planes {
-       struct ia_css_frame_plane r; /**< Red plane */
-       struct ia_css_frame_plane g; /**< Green plane */
-       struct ia_css_frame_plane b; /**< Blue plane */
+       struct ia_css_frame_plane r; /** Red plane */
+       struct ia_css_frame_plane g; /** Green plane */
+       struct ia_css_frame_plane b; /** Blue plane */
 };
 
-/** Container for 6-plane frames. These frames are used internally
+/* Container for 6-plane frames. These frames are used internally
  *  in the advanced ISP only.
  */
 struct ia_css_frame_plane6_planes {
-       struct ia_css_frame_plane r;      /**< Red plane */
-       struct ia_css_frame_plane r_at_b; /**< Red at blue plane */
-       struct ia_css_frame_plane gr;     /**< Red-green plane */
-       struct ia_css_frame_plane gb;     /**< Blue-green plane */
-       struct ia_css_frame_plane b;      /**< Blue plane */
-       struct ia_css_frame_plane b_at_r; /**< Blue at red plane */
+       struct ia_css_frame_plane r;      /** Red plane */
+       struct ia_css_frame_plane r_at_b; /** Red at blue plane */
+       struct ia_css_frame_plane gr;     /** Red-green plane */
+       struct ia_css_frame_plane gb;     /** Blue-green plane */
+       struct ia_css_frame_plane b;      /** Blue plane */
+       struct ia_css_frame_plane b_at_r; /** Blue at red plane */
 };
 
 /* Crop info struct - stores the lines to be cropped in isp */
@@ -103,15 +103,15 @@ struct ia_css_crop_info {
        unsigned int start_line;
 };
 
-/** Frame info struct. This describes the contents of an image frame buffer.
+/* Frame info struct. This describes the contents of an image frame buffer.
   */
 struct ia_css_frame_info {
-       struct ia_css_resolution res; /**< Frame resolution (valid data) */
-       unsigned int padded_width; /**< stride of line in memory (in pixels) */
-       enum ia_css_frame_format format; /**< format of the frame data */
-       unsigned int raw_bit_depth; /**< number of valid bits per pixel,
+       struct ia_css_resolution res; /** Frame resolution (valid data) */
+       unsigned int padded_width; /** stride of line in memory (in pixels) */
+       enum ia_css_frame_format format; /** format of the frame data */
+       unsigned int raw_bit_depth; /** number of valid bits per pixel,
                                         only valid for RAW bayer frames */
-       enum ia_css_bayer_order raw_bayer_order; /**< bayer order, only valid
+       enum ia_css_bayer_order raw_bayer_order; /** bayer order, only valid
                                                      for RAW bayer frames */
        /* the params below are computed based on bayer_order
         * we can remove the raw_bayer_order if it is redundant
@@ -136,9 +136,9 @@ struct ia_css_frame_info {
  *  Specifies the DVS loop delay in "frame periods"
  */
 enum ia_css_frame_delay {
-       IA_CSS_FRAME_DELAY_0, /**< Frame delay = 0 */
-       IA_CSS_FRAME_DELAY_1, /**< Frame delay = 1 */
-       IA_CSS_FRAME_DELAY_2  /**< Frame delay = 2 */
+       IA_CSS_FRAME_DELAY_0, /** Frame delay = 0 */
+       IA_CSS_FRAME_DELAY_1, /** Frame delay = 1 */
+       IA_CSS_FRAME_DELAY_2  /** Frame delay = 2 */
 };
 
 enum ia_css_frame_flash_state {
@@ -147,13 +147,13 @@ enum ia_css_frame_flash_state {
        IA_CSS_FRAME_FLASH_STATE_FULL
 };
 
-/** Frame structure. This structure describes an image buffer or frame.
+/* Frame structure. This structure describes an image buffer or frame.
  *  This is the main structure used for all input and output images.
  */
 struct ia_css_frame {
-       struct ia_css_frame_info info; /**< info struct describing the frame */
-       ia_css_ptr   data;             /**< pointer to start of image data */
-       unsigned int data_bytes;       /**< size of image data in bytes */
+       struct ia_css_frame_info info; /** info struct describing the frame */
+       ia_css_ptr   data;             /** pointer to start of image data */
+       unsigned int data_bytes;       /** size of image data in bytes */
        /* LA: move this to ia_css_buffer */
        /*
         * -1 if data address is static during life time of pipeline
@@ -171,10 +171,10 @@ struct ia_css_frame {
        enum ia_css_buffer_type buf_type;
        enum ia_css_frame_flash_state flash_state;
        unsigned int exp_id;
-       /**< exposure id, see ia_css_event_public.h for more detail */
-       uint32_t isp_config_id; /**< Unique ID to track which config was actually applied to a particular frame */
-       bool valid; /**< First video output frame is not valid */
-       bool contiguous; /**< memory is allocated physically contiguously */
+       /** exposure id, see ia_css_event_public.h for more detail */
+       uint32_t isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
+       bool valid; /** First video output frame is not valid */
+       bool contiguous; /** memory is allocated physically contiguously */
        union {
                unsigned int    _initialisation_dummy;
                struct ia_css_frame_plane raw;
@@ -185,7 +185,7 @@ struct ia_css_frame {
                struct ia_css_frame_nv_planes nv;
                struct ia_css_frame_plane6_planes plane6;
                struct ia_css_frame_binary_plane binary;
-       } planes; /**< frame planes, select the right one based on
+       } planes; /** frame planes, select the right one based on
                       info.format */
 };
 
@@ -204,7 +204,7 @@ struct ia_css_frame {
        { 0 }                                   /* planes */ \
 }
 
-/** @brief Fill a frame with zeros
+/* @brief Fill a frame with zeros
  *
  * @param      frame           The frame.
  * @return     None
@@ -213,7 +213,7 @@ struct ia_css_frame {
  */
 void ia_css_frame_zero(struct ia_css_frame *frame);
 
-/** @brief Allocate a CSS frame structure
+/* @brief Allocate a CSS frame structure
  *
  * @param      frame           The allocated frame.
  * @param      width           The width (in pixels) of the frame.
@@ -234,7 +234,7 @@ ia_css_frame_allocate(struct ia_css_frame **frame,
                      unsigned int stride,
                      unsigned int raw_bit_depth);
 
-/** @brief Allocate a CSS frame structure using a frame info structure.
+/* @brief Allocate a CSS frame structure using a frame info structure.
  *
  * @param      frame   The allocated frame.
  * @param[in]  info    The frame info structure.
@@ -247,7 +247,7 @@ ia_css_frame_allocate(struct ia_css_frame **frame,
 enum ia_css_err
 ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
                                const struct ia_css_frame_info *info);
-/** @brief Free a CSS frame structure.
+/* @brief Free a CSS frame structure.
  *
  * @param[in]  frame   Pointer to the frame.
  * @return     None
@@ -258,7 +258,7 @@ ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
 void
 ia_css_frame_free(struct ia_css_frame *frame);
 
-/** @brief Allocate a contiguous CSS frame structure
+/* @brief Allocate a contiguous CSS frame structure
  *
  * @param      frame           The allocated frame.
  * @param      width           The width (in pixels) of the frame.
@@ -280,7 +280,7 @@ ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
                                 unsigned int stride,
                                 unsigned int raw_bit_depth);
 
-/** @brief Allocate a contiguous CSS frame from a frame info structure.
+/* @brief Allocate a contiguous CSS frame from a frame info structure.
  *
  * @param      frame   The allocated frame.
  * @param[in]  info    The frame info structure.
@@ -296,7 +296,7 @@ enum ia_css_err
 ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame,
                                          const struct ia_css_frame_info *info);
 
-/** @brief Allocate a CSS frame structure using a frame info structure.
+/* @brief Allocate a CSS frame structure using a frame info structure.
  *
  * @param      frame   The allocated frame.
  * @param[in]  info    The frame info structure.
@@ -309,7 +309,7 @@ enum ia_css_err
 ia_css_frame_create_from_info(struct ia_css_frame **frame,
        const struct ia_css_frame_info *info);
 
-/** @brief Set a mapped data buffer to a CSS frame
+/* @brief Set a mapped data buffer to a CSS frame
  *
  * @param[in]  frame       Valid CSS frame pointer
  * @param[in]  mapped_data  Mapped data buffer to be assigned to the CSS frame
@@ -327,7 +327,7 @@ ia_css_frame_set_data(struct ia_css_frame *frame,
        const ia_css_ptr   mapped_data,
        size_t data_size_bytes);
 
-/** @brief Map an existing frame data pointer to a CSS frame.
+/* @brief Map an existing frame data pointer to a CSS frame.
  *
  * @param      frame           Pointer to the frame to be initialized
  * @param[in]  info            The frame info.
@@ -350,7 +350,7 @@ ia_css_frame_map(struct ia_css_frame **frame,
                 uint16_t attribute,
                 void *context);
 
-/** @brief Unmap a CSS frame structure.
+/* @brief Unmap a CSS frame structure.
  *
  * @param[in]  frame   Pointer to the CSS frame.
  * @return     None
index 8a17c33..f415570 100644 (file)
 #ifndef __IA_CSS_INPUT_PORT_H
 #define __IA_CSS_INPUT_PORT_H
 
-/** @file
+/* @file
  * This file contains information about the possible input ports for CSS
  */
 
-/** Enumeration of the physical input ports on the CSS hardware.
+/* Enumeration of the physical input ports on the CSS hardware.
  *  There are 3 MIPI CSI-2 ports.
  */
 enum ia_css_csi2_port {
@@ -28,39 +28,39 @@ enum ia_css_csi2_port {
        IA_CSS_CSI2_PORT2  /* Implicitly map to MIPI_PORT2_ID */
 };
 
-/** Backward compatible for CSS API 2.0 only
+/* Backward compatible for CSS API 2.0 only
  *  TO BE REMOVED when all drivers move to CSS API 2.1
  */
 #define        IA_CSS_CSI2_PORT_4LANE IA_CSS_CSI2_PORT0
 #define        IA_CSS_CSI2_PORT_1LANE IA_CSS_CSI2_PORT1
 #define        IA_CSS_CSI2_PORT_2LANE IA_CSS_CSI2_PORT2
 
-/** The CSI2 interface supports 2 types of compression or can
+/* The CSI2 interface supports 2 types of compression or can
  *  be run without compression.
  */
 enum ia_css_csi2_compression_type {
-       IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /**< No compression */
-       IA_CSS_CSI2_COMPRESSION_TYPE_1,    /**< Compression scheme 1 */
-       IA_CSS_CSI2_COMPRESSION_TYPE_2     /**< Compression scheme 2 */
+       IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /** No compression */
+       IA_CSS_CSI2_COMPRESSION_TYPE_1,    /** Compression scheme 1 */
+       IA_CSS_CSI2_COMPRESSION_TYPE_2     /** Compression scheme 2 */
 };
 
 struct ia_css_csi2_compression {
        enum ia_css_csi2_compression_type type;
-       /**< Compression used */
+       /** Compression used */
        unsigned int                      compressed_bits_per_pixel;
-       /**< Compressed bits per pixel (only when compression is enabled) */
+       /** Compressed bits per pixel (only when compression is enabled) */
        unsigned int                      uncompressed_bits_per_pixel;
-       /**< Uncompressed bits per pixel (only when compression is enabled) */
+       /** Uncompressed bits per pixel (only when compression is enabled) */
 };
 
-/** Input port structure.
+/* Input port structure.
  */
 struct ia_css_input_port {
-       enum ia_css_csi2_port port; /**< Physical CSI-2 port */
-       unsigned int num_lanes; /**< Number of lanes used (4-lane port only) */
-       unsigned int timeout;   /**< Timeout value */
-       unsigned int rxcount;   /**< Register value, should include all lanes */
-       struct ia_css_csi2_compression compression; /**< Compression used */
+       enum ia_css_csi2_port port; /** Physical CSI-2 port */
+       unsigned int num_lanes; /** Number of lanes used (4-lane port only) */
+       unsigned int timeout;   /** Timeout value */
+       unsigned int rxcount;   /** Register value, should include all lanes */
+       struct ia_css_csi2_compression compression; /** Compression used */
 };
 
 #endif /* __IA_CSS_INPUT_PORT_H */
index 416ca4d..10ef611 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_IRQ_H
 #define __IA_CSS_IRQ_H
 
-/** @file
+/* @file
  * This file contains information for Interrupts/IRQs from CSS
  */
 
 #include "ia_css_pipe_public.h"
 #include "ia_css_input_port.h"
 
-/** Interrupt types, these enumerate all supported interrupt types.
+/* Interrupt types, these enumerate all supported interrupt types.
  */
 enum ia_css_irq_type {
-       IA_CSS_IRQ_TYPE_EDGE,  /**< Edge (level) sensitive interrupt */
-       IA_CSS_IRQ_TYPE_PULSE  /**< Pulse-shaped interrupt */
+       IA_CSS_IRQ_TYPE_EDGE,  /** Edge (level) sensitive interrupt */
+       IA_CSS_IRQ_TYPE_PULSE  /** Pulse-shaped interrupt */
 };
 
-/** Interrupt request type.
+/* Interrupt request type.
  *  When the CSS hardware generates an interrupt, a function in this API
  *  needs to be called to retrieve information about the interrupt.
  *  This interrupt type is part of this information and indicates what
@@ -46,55 +46,55 @@ enum ia_css_irq_type {
  */
 enum ia_css_irq_info {
        IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR            = 1 << 0,
-       /**< the css receiver has encountered an error */
+       /** the css receiver has encountered an error */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW    = 1 << 1,
-       /**< the FIFO in the csi receiver has overflown */
+       /** the FIFO in the csi receiver has overflown */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF              = 1 << 2,
-       /**< the css receiver received the start of frame */
+       /** the css receiver received the start of frame */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF              = 1 << 3,
-       /**< the css receiver received the end of frame */
+       /** the css receiver received the end of frame */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL              = 1 << 4,
-       /**< the css receiver received the start of line */
+       /** the css receiver received the start of line */
        IA_CSS_IRQ_INFO_PSYS_EVENTS_READY             = 1 << 5,
-       /**< One or more events are available in the PSYS event queue */
+       /** One or more events are available in the PSYS event queue */
        IA_CSS_IRQ_INFO_EVENTS_READY = IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
-       /**< deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
+       /** deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
         * same functionality.} */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL              = 1 << 6,
-       /**< the css receiver received the end of line */
+       /** the css receiver received the end of line */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7,
-       /**< the css receiver received a change in side band signals */
+       /** the css receiver received a change in side band signals */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0      = 1 << 8,
-       /**< generic short packets (0) */
+       /** generic short packets (0) */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1      = 1 << 9,
-       /**< generic short packets (1) */
+       /** generic short packets (1) */
        IA_CSS_IRQ_INFO_IF_PRIM_ERROR                 = 1 << 10,
-       /**< the primary input formatter (A) has encountered an error */
+       /** the primary input formatter (A) has encountered an error */
        IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR               = 1 << 11,
-       /**< the primary input formatter (B) has encountered an error */
+       /** the primary input formatter (B) has encountered an error */
        IA_CSS_IRQ_INFO_IF_SEC_ERROR                  = 1 << 12,
-       /**< the secondary input formatter has encountered an error */
+       /** the secondary input formatter has encountered an error */
        IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR           = 1 << 13,
-       /**< the stream-to-memory device has encountered an error */
+       /** the stream-to-memory device has encountered an error */
        IA_CSS_IRQ_INFO_SW_0                          = 1 << 14,
-       /**< software interrupt 0 */
+       /** software interrupt 0 */
        IA_CSS_IRQ_INFO_SW_1                          = 1 << 15,
-       /**< software interrupt 1 */
+       /** software interrupt 1 */
        IA_CSS_IRQ_INFO_SW_2                          = 1 << 16,
-       /**< software interrupt 2 */
+       /** software interrupt 2 */
        IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY   = 1 << 17,
-       /**< ISP binary statistics are ready */
+       /** ISP binary statistics are ready */
        IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR            = 1 << 18,
-       /**< the input system in in error */
+       /** the input system in in error */
        IA_CSS_IRQ_INFO_IF_ERROR                      = 1 << 19,
-       /**< the input formatter in in error */
+       /** the input formatter in in error */
        IA_CSS_IRQ_INFO_DMA_ERROR                     = 1 << 20,
-       /**< the dma in in error */
+       /** the dma in in error */
        IA_CSS_IRQ_INFO_ISYS_EVENTS_READY             = 1 << 21,
-       /**< end-of-frame events are ready in the isys_event queue */
+       /** end-of-frame events are ready in the isys_event queue */
 };
 
-/** CSS receiver error types. Whenever the CSS receiver has encountered
+/* CSS receiver error types. Whenever the CSS receiver has encountered
  *  an error, this enumeration is used to indicate which errors have occurred.
  *
  *  Note that multiple error flags can be enabled at once and that this is in
@@ -105,39 +105,39 @@ enum ia_css_irq_info {
  * different receiver types, or possibly none in case of tests systems.
  */
 enum ia_css_rx_irq_info {
-       IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN   = 1U << 0, /**< buffer overrun */
-       IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /**< entering sleep mode */
-       IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE  = 1U << 2, /**< exited sleep mode */
-       IA_CSS_RX_IRQ_INFO_ECC_CORRECTED    = 1U << 3, /**< ECC corrected */
+       IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN   = 1U << 0, /** buffer overrun */
+       IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /** entering sleep mode */
+       IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE  = 1U << 2, /** exited sleep mode */
+       IA_CSS_RX_IRQ_INFO_ECC_CORRECTED    = 1U << 3, /** ECC corrected */
        IA_CSS_RX_IRQ_INFO_ERR_SOT          = 1U << 4,
-                                               /**< Start of transmission */
-       IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC     = 1U << 5, /**< SOT sync (??) */
-       IA_CSS_RX_IRQ_INFO_ERR_CONTROL      = 1U << 6, /**< Control (??) */
-       IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE   = 1U << 7, /**< Double ECC */
-       IA_CSS_RX_IRQ_INFO_ERR_CRC          = 1U << 8, /**< CRC error */
-       IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID   = 1U << 9, /**< Unknown ID */
-       IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC   = 1U << 10,/**< Frame sync error */
-       IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA   = 1U << 11,/**< Frame data error */
-       IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/**< Timeout occurred */
-       IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC  = 1U << 13,/**< Unknown escape seq. */
-       IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC    = 1U << 14,/**< Line Sync error */
+                                               /** Start of transmission */
+       IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC     = 1U << 5, /** SOT sync (??) */
+       IA_CSS_RX_IRQ_INFO_ERR_CONTROL      = 1U << 6, /** Control (??) */
+       IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE   = 1U << 7, /** Double ECC */
+       IA_CSS_RX_IRQ_INFO_ERR_CRC          = 1U << 8, /** CRC error */
+       IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID   = 1U << 9, /** Unknown ID */
+       IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC   = 1U << 10,/** Frame sync error */
+       IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA   = 1U << 11,/** Frame data error */
+       IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/** Timeout occurred */
+       IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC  = 1U << 13,/** Unknown escape seq. */
+       IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC    = 1U << 14,/** Line Sync error */
        IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT     = 1U << 15,
 };
 
-/** Interrupt info structure. This structure contains information about an
+/* Interrupt info structure. This structure contains information about an
  *  interrupt. This needs to be used after an interrupt is received on the IA
  *  to perform the correct action.
  */
 struct ia_css_irq {
-       enum ia_css_irq_info type; /**< Interrupt type. */
-       unsigned int sw_irq_0_val; /**< In case of SW interrupt 0, value. */
-       unsigned int sw_irq_1_val; /**< In case of SW interrupt 1, value. */
-       unsigned int sw_irq_2_val; /**< In case of SW interrupt 2, value. */
+       enum ia_css_irq_info type; /** Interrupt type. */
+       unsigned int sw_irq_0_val; /** In case of SW interrupt 0, value. */
+       unsigned int sw_irq_1_val; /** In case of SW interrupt 1, value. */
+       unsigned int sw_irq_2_val; /** In case of SW interrupt 2, value. */
        struct ia_css_pipe *pipe;
-       /**< The image pipe that generated the interrupt. */
+       /** The image pipe that generated the interrupt. */
 };
 
-/** @brief Obtain interrupt information.
+/* @brief Obtain interrupt information.
  *
  * @param[out] info    Pointer to the interrupt info. The interrupt
  *                     information wil be written to this info.
@@ -154,7 +154,7 @@ struct ia_css_irq {
 enum ia_css_err
 ia_css_irq_translate(unsigned int *info);
 
-/** @brief Get CSI receiver error info.
+/* @brief Get CSI receiver error info.
  *
  * @param[out] irq_bits        Pointer to the interrupt bits. The interrupt
  *                     bits will be written this info.
@@ -172,7 +172,7 @@ ia_css_irq_translate(unsigned int *info);
 void
 ia_css_rx_get_irq_info(unsigned int *irq_bits);
 
-/** @brief Get CSI receiver error info.
+/* @brief Get CSI receiver error info.
  *
  * @param[in]  port     Input port identifier.
  * @param[out] irq_bits        Pointer to the interrupt bits. The interrupt
@@ -188,7 +188,7 @@ ia_css_rx_get_irq_info(unsigned int *irq_bits);
 void
 ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
 
-/** @brief Clear CSI receiver error info.
+/* @brief Clear CSI receiver error info.
  *
  * @param[in] irq_bits The bits that should be cleared from the CSI receiver
  *                     interrupt bits register.
@@ -205,7 +205,7 @@ ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
 void
 ia_css_rx_clear_irq_info(unsigned int irq_bits);
 
-/** @brief Clear CSI receiver error info.
+/* @brief Clear CSI receiver error info.
  *
  * @param[in] port      Input port identifier.
  * @param[in] irq_bits The bits that should be cleared from the CSI receiver
@@ -220,7 +220,7 @@ ia_css_rx_clear_irq_info(unsigned int irq_bits);
 void
 ia_css_rx_port_clear_irq_info(enum ia_css_csi2_port port, unsigned int irq_bits);
 
-/** @brief Enable or disable specific interrupts.
+/* @brief Enable or disable specific interrupts.
  *
  * @param[in] type     The interrupt type that will be enabled/disabled.
  * @param[in] enable   enable or disable.
index c40c5a1..8b674c9 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_METADATA_H
 #define __IA_CSS_METADATA_H
 
-/** @file
+/* @file
  * This file contains structure for processing sensor metadata.
  */
 
 #include "ia_css_types.h"
 #include "ia_css_stream_format.h"
 
-/** Metadata configuration. This data structure contains necessary info
+/* Metadata configuration. This data structure contains necessary info
  *  to process sensor metadata.
  */
 struct ia_css_metadata_config {
-       enum ia_css_stream_format data_type; /**< Data type of CSI-2 embedded
+       enum ia_css_stream_format data_type; /** Data type of CSI-2 embedded
                        data. The default value is IA_CSS_STREAM_FORMAT_EMBEDDED. For
                        certain sensors, user can choose non-default data type for embedded
                        data. */
-       struct ia_css_resolution  resolution; /**< Resolution */
+       struct ia_css_resolution  resolution; /** Resolution */
 };
 
 struct ia_css_metadata_info {
-       struct ia_css_resolution resolution; /**< Resolution */
-       uint32_t                 stride;     /**< Stride in bytes */
-       uint32_t                 size;       /**< Total size in bytes */
+       struct ia_css_resolution resolution; /** Resolution */
+       uint32_t                 stride;     /** Stride in bytes */
+       uint32_t                 size;       /** Total size in bytes */
 };
 
 struct ia_css_metadata {
-       struct ia_css_metadata_info info;    /**< Layout info */
-       ia_css_ptr                  address; /**< CSS virtual address */
+       struct ia_css_metadata_info info;    /** Layout info */
+       ia_css_ptr                  address; /** CSS virtual address */
        uint32_t                    exp_id;
-       /**< Exposure ID, see ia_css_event_public.h for more detail */
+       /** Exposure ID, see ia_css_event_public.h for more detail */
 };
 #define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata)
 
-/** @brief Allocate a metadata buffer.
+/* @brief Allocate a metadata buffer.
  * @param[in]   metadata_info Metadata info struct, contains details on metadata buffers.
  * @return      Pointer of metadata buffer or NULL (if error)
  *
@@ -58,7 +58,7 @@ struct ia_css_metadata {
 struct ia_css_metadata *
 ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info);
 
-/** @brief Free a metadata buffer.
+/* @brief Free a metadata buffer.
  *
  * @param[in]  metadata        Pointer of metadata buffer.
  * @return     None
index fd2c01b..f9c9cd7 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_MIPI_H
 #define __IA_CSS_MIPI_H
 
-/** @file
+/* @file
  * This file contains MIPI support functionality
  */
 
 #include "ia_css_stream_format.h"
 #include "ia_css_input_port.h"
 
-/** Backward compatible for CSS API 2.0 only
+/* Backward compatible for CSS API 2.0 only
  * TO BE REMOVED when all drivers move to CSS API 2.1.
  */
-/** @brief Specify a CSS MIPI frame buffer.
+/* @brief Specify a CSS MIPI frame buffer.
  *
  * @param[in]  size_mem_words  The frame size in memory words (32B).
  * @param[in]  contiguous      Allocate memory physically contiguously or not.
@@ -42,7 +42,7 @@ ia_css_mipi_frame_specify(const unsigned int  size_mem_words,
                                const bool contiguous);
 
 #if !defined(HAS_NO_INPUT_SYSTEM)
-/** @brief Register size of a CSS MIPI frame for check during capturing.
+/* @brief Register size of a CSS MIPI frame for check during capturing.
  *
  * @param[in]  port    CSI-2 port this check is registered.
  * @param[in]  size_mem_words  The frame size in memory words (32B).
@@ -59,7 +59,7 @@ ia_css_mipi_frame_enable_check_on_size(const enum ia_css_csi2_port port,
                                const unsigned int      size_mem_words);
 #endif
 
-/** @brief Calculate the size of a mipi frame.
+/* @brief Calculate the size of a mipi frame.
  *
  * @param[in]  width           The width (in pixels) of the frame.
  * @param[in]  height          The height (in lines) of the frame.
index 48f8855..13c2105 100644 (file)
 #ifndef __IA_CSS_MMU_H
 #define __IA_CSS_MMU_H
 
-/** @file
+/* @file
  * This file contains one support function for invalidating the CSS MMU cache
  */
 
-/** @brief Invalidate the MMU internal cache.
+/* @brief Invalidate the MMU internal cache.
  * @return     None
  *
  * This function triggers an invalidation of the translate-look-aside
index 969840d..de40963 100644 (file)
 #ifndef __IA_CSS_MORPH_H
 #define __IA_CSS_MORPH_H
 
-/** @file
+/* @file
  * This file contains supporting for morphing table
  */
 
 #include <ia_css_types.h>
 
-/** @brief Morphing table
+/* @brief Morphing table
  * @param[in]  width Width of the morphing table.
  * @param[in]  height Height of the morphing table.
  * @return             Pointer to the morphing table
@@ -29,7 +29,7 @@
 struct ia_css_morph_table *
 ia_css_morph_table_allocate(unsigned int width, unsigned int height);
 
-/** @brief Free the morph table
+/* @brief Free the morph table
  * @param[in]  me Pointer to the morph table.
  * @return             None
 */
index 733e0ef..df0aad9 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_PIPE_PUBLIC_H
 #define __IA_CSS_PIPE_PUBLIC_H
 
-/** @file
+/* @file
  * This file contains the public interface for CSS pipes.
  */
 
@@ -34,7 +34,7 @@ enum {
        IA_CSS_PIPE_MAX_OUTPUT_STAGE,
 };
 
-/** Enumeration of pipe modes. This mode can be used to create
+/* Enumeration of pipe modes. This mode can be used to create
  *  an image pipe for this mode. These pipes can be combined
  *  to configure and run streams on the ISP.
  *
@@ -42,12 +42,12 @@ enum {
  *  create a continuous capture stream.
  */
 enum ia_css_pipe_mode {
-       IA_CSS_PIPE_MODE_PREVIEW,       /**< Preview pipe */
-       IA_CSS_PIPE_MODE_VIDEO,         /**< Video pipe */
-       IA_CSS_PIPE_MODE_CAPTURE,       /**< Still capture pipe */
-       IA_CSS_PIPE_MODE_ACC,           /**< Accelerated pipe */
-       IA_CSS_PIPE_MODE_COPY,          /**< Copy pipe, only used for embedded/image data copying */
-       IA_CSS_PIPE_MODE_YUVPP,         /**< YUV post processing pipe, used for all use cases with YUV input,
+       IA_CSS_PIPE_MODE_PREVIEW,       /** Preview pipe */
+       IA_CSS_PIPE_MODE_VIDEO,         /** Video pipe */
+       IA_CSS_PIPE_MODE_CAPTURE,       /** Still capture pipe */
+       IA_CSS_PIPE_MODE_ACC,           /** Accelerated pipe */
+       IA_CSS_PIPE_MODE_COPY,          /** Copy pipe, only used for embedded/image data copying */
+       IA_CSS_PIPE_MODE_YUVPP,         /** YUV post processing pipe, used for all use cases with YUV input,
                                                                        for SoC sensor and external ISP */
 };
 /* Temporary define  */
@@ -58,10 +58,10 @@ enum ia_css_pipe_mode {
  * the order should match with definition in sh_css_defs.h
  */
 enum ia_css_pipe_version {
-       IA_CSS_PIPE_VERSION_1 = 1,              /**< ISP1.0 pipe */
-       IA_CSS_PIPE_VERSION_2_2 = 2,            /**< ISP2.2 pipe */
-       IA_CSS_PIPE_VERSION_2_6_1 = 3,          /**< ISP2.6.1 pipe */
-       IA_CSS_PIPE_VERSION_2_7 = 4             /**< ISP2.7 pipe */
+       IA_CSS_PIPE_VERSION_1 = 1,              /** ISP1.0 pipe */
+       IA_CSS_PIPE_VERSION_2_2 = 2,            /** ISP2.2 pipe */
+       IA_CSS_PIPE_VERSION_2_6_1 = 3,          /** ISP2.6.1 pipe */
+       IA_CSS_PIPE_VERSION_2_7 = 4             /** ISP2.7 pipe */
 };
 
 /**
@@ -71,79 +71,79 @@ enum ia_css_pipe_version {
  */
 struct ia_css_pipe_config {
        enum ia_css_pipe_mode mode;
-       /**< mode, indicates which mode the pipe should use. */
+       /** mode, indicates which mode the pipe should use. */
        enum ia_css_pipe_version isp_pipe_version;
-       /**< pipe version, indicates which imaging pipeline the pipe should use. */
+       /** pipe version, indicates which imaging pipeline the pipe should use. */
        struct ia_css_resolution input_effective_res;
-       /**< input effective resolution */
+       /** input effective resolution */
        struct ia_css_resolution bayer_ds_out_res;
-       /**< bayer down scaling */
+       /** bayer down scaling */
        struct ia_css_resolution capt_pp_in_res;
 #ifndef ISP2401
-       /**< bayer down scaling */
+       /** bayer down scaling */
 #else
-       /**< capture post processing input resolution */
+       /** capture post processing input resolution */
 #endif
        struct ia_css_resolution vf_pp_in_res;
 #ifndef ISP2401
-       /**< bayer down scaling */
+       /** bayer down scaling */
 #else
-       /**< view finder post processing input resolution */
+       /** view finder post processing input resolution */
        struct ia_css_resolution output_system_in_res;
-       /**< For IPU3 only: use output_system_in_res to specify what input resolution
+       /** For IPU3 only: use output_system_in_res to specify what input resolution
             will OSYS receive, this resolution is equal to the output resolution of GDC
             if not determined CSS will set output_system_in_res with main osys output pin resolution
             All other IPUs may ignore this property */
 #endif
        struct ia_css_resolution dvs_crop_out_res;
-       /**< dvs crop, video only, not in use yet. Use dvs_envelope below. */
+       /** dvs crop, video only, not in use yet. Use dvs_envelope below. */
        struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
-       /**< output of YUV scaling */
+       /** output of YUV scaling */
        struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
-       /**< output of VF YUV scaling */
+       /** output of VF YUV scaling */
        struct ia_css_fw_info *acc_extension;
-       /**< Pipeline extension accelerator */
+       /** Pipeline extension accelerator */
        struct ia_css_fw_info **acc_stages;
-       /**< Standalone accelerator stages */
+       /** Standalone accelerator stages */
        uint32_t num_acc_stages;
-       /**< Number of standalone accelerator stages */
+       /** Number of standalone accelerator stages */
        struct ia_css_capture_config default_capture_config;
-       /**< Default capture config for initial capture pipe configuration. */
-       struct ia_css_resolution dvs_envelope; /**< temporary */
+       /** Default capture config for initial capture pipe configuration. */
+       struct ia_css_resolution dvs_envelope; /** temporary */
        enum ia_css_frame_delay dvs_frame_delay;
-       /**< indicates the DVS loop delay in frame periods */
+       /** indicates the DVS loop delay in frame periods */
        int acc_num_execs;
-       /**< For acceleration pipes only: determine how many times the pipe
+       /** For acceleration pipes only: determine how many times the pipe
             should be run. Setting this to -1 means it will run until
             stopped. */
        bool enable_dz;
-       /**< Disabling digital zoom for a pipeline, if this is set to false,
+       /** Disabling digital zoom for a pipeline, if this is set to false,
             then setting a zoom factor will have no effect.
             In some use cases this provides better performance. */
        bool enable_dpc;
-       /**< Disabling "Defect Pixel Correction" for a pipeline, if this is set
+       /** Disabling "Defect Pixel Correction" for a pipeline, if this is set
             to false. In some use cases this provides better performance. */
        bool enable_vfpp_bci;
-       /**< Enabling BCI mode will cause yuv_scale binary to be picked up
+       /** Enabling BCI mode will cause yuv_scale binary to be picked up
             instead of vf_pp. This only applies to viewfinder post
             processing stages. */
 #ifdef ISP2401
        bool enable_luma_only;
-       /**< Enabling of monochrome mode for a pipeline. If enabled only luma processing
+       /** Enabling of monochrome mode for a pipeline. If enabled only luma processing
             will be done. */
        bool enable_tnr;
-       /**< Enabling of TNR (temporal noise reduction). This is only applicable to video
+       /** Enabling of TNR (temporal noise reduction). This is only applicable to video
             pipes. Non video-pipes should always set this parameter to false. */
 #endif
        struct ia_css_isp_config *p_isp_config;
-       /**< Pointer to ISP configuration */
+       /** Pointer to ISP configuration */
        struct ia_css_resolution gdc_in_buffer_res;
-       /**< GDC in buffer resolution. */
+       /** GDC in buffer resolution. */
        struct ia_css_point gdc_in_buffer_offset;
-       /**< GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
+       /** GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
 #ifdef ISP2401
        struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl;
-       /**< Origin of internal frame positioned on shading table at shading correction in ISP.
+       /** Origin of internal frame positioned on shading table at shading correction in ISP.
             NOTE: Shading table is larger than or equal to internal frame.
                   Shading table has shading gains and internal frame has bayer data.
                   The origin of internal frame is used in shading correction in ISP
@@ -228,20 +228,20 @@ struct ia_css_pipe_config {
 
 #endif
 
-/** Pipe info, this struct describes properties of a pipe after it's stream has
+/* Pipe info, this struct describes properties of a pipe after it's stream has
  * been created.
  * ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated.
  *           - On the Behalf of CSS-API Committee.
  */
 struct ia_css_pipe_info {
        struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
-       /**< Info about output resolution. This contains the stride which
+       /** Info about output resolution. This contains the stride which
             should be used for memory allocation. */
        struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
-       /**< Info about viewfinder output resolution (optional). This contains
+       /** Info about viewfinder output resolution (optional). This contains
             the stride that should be used for memory allocation. */
        struct ia_css_frame_info raw_output_info;
-       /**< Raw output resolution. This indicates the resolution of the
+       /** Raw output resolution. This indicates the resolution of the
             RAW bayer output for pipes that support this. Currently, only the
             still capture pipes support this feature. When this resolution is
             smaller than the input resolution, cropping will be performed by
@@ -252,17 +252,17 @@ struct ia_css_pipe_info {
             the input resolution - 8x8. */
 #ifdef ISP2401
        struct ia_css_resolution output_system_in_res_info;
-       /**< For IPU3 only. Info about output system in resolution which is considered
+       /** For IPU3 only. Info about output system in resolution which is considered
             as gdc out resolution. */
 #endif
        struct ia_css_shading_info shading_info;
-       /**< After an image pipe is created, this field will contain the info
+       /** After an image pipe is created, this field will contain the info
             for the shading correction. */
        struct ia_css_grid_info  grid_info;
-       /**< After an image pipe is created, this field will contain the grid
+       /** After an image pipe is created, this field will contain the grid
             info for 3A and DVS. */
        int num_invalid_frames;
-       /**< The very first frames in a started stream do not contain valid data.
+       /** The very first frames in a started stream do not contain valid data.
             In this field, the CSS-firmware communicates to the host-driver how
             many initial frames will contain invalid data; this allows the
             host-driver to discard those initial invalid frames and start it's
@@ -299,7 +299,7 @@ struct ia_css_pipe_info {
 
 #endif
 
-/** @brief Load default pipe configuration
+/* @brief Load default pipe configuration
  * @param[out] pipe_config The pipe configuration.
  * @return     None
  *
@@ -334,7 +334,7 @@ struct ia_css_pipe_info {
  */
 void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config);
 
-/** @brief Create a pipe
+/* @brief Create a pipe
  * @param[in]  config The pipe configuration.
  * @param[out] pipe The pipe.
  * @return     IA_CSS_SUCCESS or the error code.
@@ -346,7 +346,7 @@ enum ia_css_err
 ia_css_pipe_create(const struct ia_css_pipe_config *config,
                   struct ia_css_pipe **pipe);
 
-/** @brief Destroy a pipe
+/* @brief Destroy a pipe
  * @param[in]  pipe The pipe.
  * @return     IA_CSS_SUCCESS or the error code.
  *
@@ -355,7 +355,7 @@ ia_css_pipe_create(const struct ia_css_pipe_config *config,
 enum ia_css_err
 ia_css_pipe_destroy(struct ia_css_pipe *pipe);
 
-/** @brief Provides information about a pipe
+/* @brief Provides information about a pipe
  * @param[in]  pipe The pipe.
  * @param[out] pipe_info The pipe information.
  * @return     IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS.
@@ -366,7 +366,7 @@ enum ia_css_err
 ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
                     struct ia_css_pipe_info *pipe_info);
 
-/** @brief Configure a pipe with filter coefficients.
+/* @brief Configure a pipe with filter coefficients.
  * @param[in]  pipe    The pipe.
  * @param[in]  config  The pointer to ISP configuration.
  * @return             IA_CSS_SUCCESS or error code upon error.
@@ -378,7 +378,7 @@ enum ia_css_err
 ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
                                                   struct ia_css_isp_config *config);
 
-/** @brief Controls when the Event generator raises an IRQ to the Host.
+/* @brief Controls when the Event generator raises an IRQ to the Host.
  *
  * @param[in]  pipe    The pipe.
  * @param[in]  or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe
@@ -455,7 +455,7 @@ ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
                         unsigned int or_mask,
                         unsigned int and_mask);
 
-/** @brief Reads the current event IRQ mask from the CSS.
+/* @brief Reads the current event IRQ mask from the CSS.
  *
  * @param[in]  pipe The pipe.
  * @param[out] or_mask Current or_mask. The bits in this mask are a binary or
@@ -476,7 +476,7 @@ ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
                          unsigned int *or_mask,
                          unsigned int *and_mask);
 
-/** @brief Queue a buffer for an image pipe.
+/* @brief Queue a buffer for an image pipe.
  *
  * @param[in] pipe     The pipe that will own the buffer.
  * @param[in] buffer   Pointer to the buffer.
@@ -498,7 +498,7 @@ enum ia_css_err
 ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
                           const struct ia_css_buffer *buffer);
 
-/** @brief Dequeue a buffer from an image pipe.
+/* @brief Dequeue a buffer from an image pipe.
  *
  * @param[in]    pipe   The pipeline that the buffer queue belongs to.
  * @param[in,out] buffer The buffer is used to lookup the type which determines
@@ -519,7 +519,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
                           struct ia_css_buffer *buffer);
 
 
-/** @brief  Set the state (Enable or Disable) of the Extension stage in the
+/* @brief  Set the state (Enable or Disable) of the Extension stage in the
  *          given pipe.
  * @param[in] pipe         Pipe handle.
  * @param[in] fw_handle    Extension firmware Handle (ia_css_fw_info.handle)
@@ -546,7 +546,7 @@ ia_css_pipe_set_qos_ext_state (struct ia_css_pipe *pipe,
                            uint32_t fw_handle,
                            bool  enable);
 
-/** @brief  Get the state (Enable or Disable) of the Extension stage in the
+/* @brief  Get the state (Enable or Disable) of the Extension stage in the
  *          given pipe.
  * @param[in]  pipe        Pipe handle.
  * @param[in]  fw_handle   Extension firmware Handle (ia_css_fw_info.handle)
@@ -573,7 +573,7 @@ ia_css_pipe_get_qos_ext_state (struct ia_css_pipe *pipe,
                            bool * enable);
 
 #ifdef ISP2401
-/** @brief  Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
+/* @brief  Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
  * @param[in] pipe             Pipe handle.
  * @param[in] fw_handle        Extension firmware Handle (ia_css_fw_info.handle).
  * @param[in] css_seg          Parameter memory descriptors for CSS segments.
@@ -595,7 +595,7 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe, uint32_t fw_hand
                        struct ia_css_isp_param_isp_segments *isp_seg);
 
 #endif
-/** @brief Get selected configuration settings
+/* @brief Get selected configuration settings
  * @param[in]  pipe    The pipe.
  * @param[out] config  Configuration settings.
  * @return             None
@@ -604,7 +604,7 @@ void
 ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
                             struct ia_css_isp_config *config);
 
-/** @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
+/* @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
  *         address space. So the LUT can be freed by caller.
  * @param[in]  pipe        Pipe handle.
  * @param[in]  lut         Look up tabel
@@ -623,7 +623,7 @@ ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
 enum ia_css_err
 ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
                                const void *lut);
-/** @brief Checking of DVS statistics ability
+/* @brief Checking of DVS statistics ability
  * @param[in]  pipe_info       The pipe info.
  * @return             true - has DVS statistics ability
  *                     false - otherwise
@@ -631,7 +631,7 @@ ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
 bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info);
 
 #ifdef ISP2401
-/** @brief Override the frameformat set on the output pins.
+/* @brief Override the frameformat set on the output pins.
  * @param[in]  pipe        Pipe handle.
  * @param[in]  output_pin  Pin index to set the format on
  *                         0 - main output pin
index 9b0eeb0..6f24656 100644 (file)
 #ifndef __IA_CSS_PRBS_H
 #define __IA_CSS_PRBS_H
 
-/** @file
+/* @file
  * This file contains support for Pseudo Random Bit Sequence (PRBS) inputs
  */
 
-/** Enumerate the PRBS IDs.
+/* Enumerate the PRBS IDs.
  */
 enum ia_css_prbs_id {
        IA_CSS_PRBS_ID0,
@@ -44,10 +44,10 @@ enum ia_css_prbs_id {
  */
 struct ia_css_prbs_config {
        enum ia_css_prbs_id     id;
-       unsigned int            h_blank;        /**< horizontal blank */
-       unsigned int            v_blank;        /**< vertical blank */
-       int                     seed;   /**< random seed for the 1st 2-pixel-components/clock */
-       int                     seed1;  /**< random seed for the 2nd 2-pixel-components/clock */
+       unsigned int            h_blank;        /** horizontal blank */
+       unsigned int            v_blank;        /** vertical blank */
+       int                     seed;   /** random seed for the 1st 2-pixel-components/clock */
+       int                     seed1;  /** random seed for the 2nd 2-pixel-components/clock */
 };
 
 #endif /* __IA_CSS_PRBS_H */
index 19af402..9a16730 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_PROPERTIES_H
 #define __IA_CSS_PROPERTIES_H
 
-/** @file
+/* @file
  * This file contains support for retrieving properties of some hardware the CSS system
  */
 
 
 struct ia_css_properties {
        int  gdc_coord_one;
-       bool l1_base_is_index; /**< Indicate whether the L1 page base
+       bool l1_base_is_index; /** Indicate whether the L1 page base
                                    is a page index or a byte address. */
        enum ia_css_vamem_type vamem_type;
 };
 
-/** @brief Get hardware properties
+/* @brief Get hardware properties
  * @param[in,out]      properties The hardware properties
  * @return     None
  *
index cb0f249..588f53d 100644 (file)
 #ifndef __IA_CSS_SHADING_H
 #define __IA_CSS_SHADING_H
 
-/** @file
+/* @file
  * This file contains support for setting the shading table for CSS
  */
 
 #include <ia_css_types.h>
 
-/** @brief Shading table
+/* @brief Shading table
  * @param[in]  width Width of the shading table.
  * @param[in]  height Height of the shading table.
  * @return             Pointer to the shading table
@@ -30,7 +30,7 @@ struct ia_css_shading_table *
 ia_css_shading_table_alloc(unsigned int width,
                           unsigned int height);
 
-/** @brief Free shading table
+/* @brief Free shading table
  * @param[in]  table Pointer to the shading table.
  * @return             None
 */
index 453fe4d..fb6e8c2 100644 (file)
@@ -48,7 +48,7 @@ struct ia_css_stream {
        bool                           started;
 };
 
-/** @brief Get a binary in the stream, which binary has the shading correction.
+/* @brief Get a binary in the stream, which binary has the shading correction.
  *
  * @param[in] stream: The stream.
  * @return     The binary which has the shading correction.
@@ -76,7 +76,7 @@ sh_css_invalidate_params(struct ia_css_stream *stream);
 const struct ia_css_fpn_table *
 ia_css_get_fpn_table(struct ia_css_stream *stream);
 
-/** @brief Get a pointer to the shading table.
+/* @brief Get a pointer to the shading table.
  *
  * @param[in] stream: The stream.
  * @return     The pointer to the shading table.
index ae608a9..f7e9020 100644 (file)
 #ifndef __IA_CSS_STREAM_FORMAT_H
 #define __IA_CSS_STREAM_FORMAT_H
 
-/** @file
+/* @file
  * This file contains formats usable for ISP streaming input
  */
 
 #include <type_support.h> /* bool */
 
-/** The ISP streaming input interface supports the following formats.
+/* The ISP streaming input interface supports the following formats.
  *  These match the corresponding MIPI formats.
  */
 enum ia_css_stream_format {
-       IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY,    /**< 8 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV420_8,  /**< 8 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV420_10, /**< 10 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV420_16, /**< 16 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV422_8,  /**< UYVY..UYVY, 8 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV422_10, /**< UYVY..UYVY, 10 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV422_16, /**< UYVY..UYVY, 16 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RGB_444,  /**< BGR..BGR, 4 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RGB_555,  /**< BGR..BGR, 5 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RGB_565,  /**< BGR..BGR, 5 bits B and R, 6 bits G */
-       IA_CSS_STREAM_FORMAT_RGB_666,  /**< BGR..BGR, 6 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RGB_888,  /**< BGR..BGR, 8 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RAW_6,    /**< RAW data, 6 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_7,    /**< RAW data, 7 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_8,    /**< RAW data, 8 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_10,   /**< RAW data, 10 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_12,   /**< RAW data, 12 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_14,   /**< RAW data, 14 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_16,   /**< RAW data, 16 bits per pixel, which is
+       IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY,    /** 8 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV420_8,  /** 8 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV420_10, /** 10 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV420_16, /** 16 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV422_8,  /** UYVY..UYVY, 8 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV422_10, /** UYVY..UYVY, 10 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV422_16, /** UYVY..UYVY, 16 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RGB_444,  /** BGR..BGR, 4 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RGB_555,  /** BGR..BGR, 5 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RGB_565,  /** BGR..BGR, 5 bits B and R, 6 bits G */
+       IA_CSS_STREAM_FORMAT_RGB_666,  /** BGR..BGR, 6 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RGB_888,  /** BGR..BGR, 8 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RAW_6,    /** RAW data, 6 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_7,    /** RAW data, 7 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_8,    /** RAW data, 8 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_10,   /** RAW data, 10 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_12,   /** RAW data, 12 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_14,   /** RAW data, 14 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_16,   /** RAW data, 16 bits per pixel, which is
                                            not specified in CSI-MIPI standard*/
-       IA_CSS_STREAM_FORMAT_BINARY_8, /**< Binary byte stream, which is target at
+       IA_CSS_STREAM_FORMAT_BINARY_8, /** Binary byte stream, which is target at
                                            JPEG. */
 
-       /** CSI2-MIPI specific format: Generic short packet data. It is used to
+       /* CSI2-MIPI specific format: Generic short packet data. It is used to
         *  keep the timing information for the opening/closing of shutters,
         *  triggering of flashes and etc.
         */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT1,  /**< Generic Short Packet Code 1 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT2,  /**< Generic Short Packet Code 2 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT3,  /**< Generic Short Packet Code 3 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT4,  /**< Generic Short Packet Code 4 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT5,  /**< Generic Short Packet Code 5 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT6,  /**< Generic Short Packet Code 6 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT7,  /**< Generic Short Packet Code 7 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT8,  /**< Generic Short Packet Code 8 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT1,  /** Generic Short Packet Code 1 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT2,  /** Generic Short Packet Code 2 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT3,  /** Generic Short Packet Code 3 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT4,  /** Generic Short Packet Code 4 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT5,  /** Generic Short Packet Code 5 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT6,  /** Generic Short Packet Code 6 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT7,  /** Generic Short Packet Code 7 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT8,  /** Generic Short Packet Code 8 */
 
-       /** CSI2-MIPI specific format: YUV data.
+       /* CSI2-MIPI specific format: YUV data.
         */
-       IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT,  /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */
-       IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+       IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT,  /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+       IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */
 
-       /** CSI2-MIPI specific format: Generic long packet data
+       /* CSI2-MIPI specific format: Generic long packet data
         */
-       IA_CSS_STREAM_FORMAT_EMBEDDED, /**< Embedded 8-bit non Image Data */
+       IA_CSS_STREAM_FORMAT_EMBEDDED, /** Embedded 8-bit non Image Data */
 
-       /** CSI2-MIPI specific format: User defined byte-based data. For example,
+       /* CSI2-MIPI specific format: User defined byte-based data. For example,
         *  the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
         *  the User Defined Data Type 4 and the MPEG data as the
         *  User Defined Data Type 7.
         */
-       IA_CSS_STREAM_FORMAT_USER_DEF1,  /**< User defined 8-bit data type 1 */
-       IA_CSS_STREAM_FORMAT_USER_DEF2,  /**< User defined 8-bit data type 2 */
-       IA_CSS_STREAM_FORMAT_USER_DEF3,  /**< User defined 8-bit data type 3 */
-       IA_CSS_STREAM_FORMAT_USER_DEF4,  /**< User defined 8-bit data type 4 */
-       IA_CSS_STREAM_FORMAT_USER_DEF5,  /**< User defined 8-bit data type 5 */
-       IA_CSS_STREAM_FORMAT_USER_DEF6,  /**< User defined 8-bit data type 6 */
-       IA_CSS_STREAM_FORMAT_USER_DEF7,  /**< User defined 8-bit data type 7 */
-       IA_CSS_STREAM_FORMAT_USER_DEF8,  /**< User defined 8-bit data type 8 */
+       IA_CSS_STREAM_FORMAT_USER_DEF1,  /** User defined 8-bit data type 1 */
+       IA_CSS_STREAM_FORMAT_USER_DEF2,  /** User defined 8-bit data type 2 */
+       IA_CSS_STREAM_FORMAT_USER_DEF3,  /** User defined 8-bit data type 3 */
+       IA_CSS_STREAM_FORMAT_USER_DEF4,  /** User defined 8-bit data type 4 */
+       IA_CSS_STREAM_FORMAT_USER_DEF5,  /** User defined 8-bit data type 5 */
+       IA_CSS_STREAM_FORMAT_USER_DEF6,  /** User defined 8-bit data type 6 */
+       IA_CSS_STREAM_FORMAT_USER_DEF7,  /** User defined 8-bit data type 7 */
+       IA_CSS_STREAM_FORMAT_USER_DEF8,  /** User defined 8-bit data type 8 */
 };
 
 #define        IA_CSS_STREAM_FORMAT_NUM        IA_CSS_STREAM_FORMAT_USER_DEF8
index 2c8d9de..ca32033 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_STREAM_PUBLIC_H
 #define __IA_CSS_STREAM_PUBLIC_H
 
-/** @file
+/* @file
  * This file contains support for configuring and controlling streams
  */
 
 #include "ia_css_prbs.h"
 #include "ia_css_input_port.h"
 
-/** Input modes, these enumerate all supported input modes.
+/* Input modes, these enumerate all supported input modes.
  *  Note that not all ISP modes support all input modes.
  */
 enum ia_css_input_mode {
-       IA_CSS_INPUT_MODE_SENSOR, /**< data from sensor */
-       IA_CSS_INPUT_MODE_FIFO,   /**< data from input-fifo */
-       IA_CSS_INPUT_MODE_TPG,    /**< data from test-pattern generator */
-       IA_CSS_INPUT_MODE_PRBS,   /**< data from pseudo-random bit stream */
-       IA_CSS_INPUT_MODE_MEMORY, /**< data from a frame in memory */
-       IA_CSS_INPUT_MODE_BUFFERED_SENSOR /**< data is sent through mipi buffer */
+       IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */
+       IA_CSS_INPUT_MODE_FIFO,   /** data from input-fifo */
+       IA_CSS_INPUT_MODE_TPG,    /** data from test-pattern generator */
+       IA_CSS_INPUT_MODE_PRBS,   /** data from pseudo-random bit stream */
+       IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */
+       IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */
 };
 
-/** Structure of the MIPI buffer configuration
+/* Structure of the MIPI buffer configuration
  */
 struct ia_css_mipi_buffer_config {
-       unsigned int size_mem_words; /**< The frame size in the system memory
+       unsigned int size_mem_words; /** The frame size in the system memory
                                          words (32B) */
-       bool contiguous;             /**< Allocated memory physically
+       bool contiguous;             /** Allocated memory physically
                                          contiguously or not. \deprecated{Will be false always.}*/
-       unsigned int nof_mipi_buffers; /**< The number of MIPI buffers required for this
+       unsigned int nof_mipi_buffers; /** The number of MIPI buffers required for this
                                        stream */
 };
 
@@ -57,44 +57,44 @@ enum {
        IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH
 };
 
-/** This is input data configuration for one MIPI data type. We can have
+/* This is input data configuration for one MIPI data type. We can have
  *  multiple of this in one virtual channel.
  */
 struct ia_css_stream_isys_stream_config {
-       struct ia_css_resolution  input_res; /**< Resolution of input data */
-       enum ia_css_stream_format format; /**< Format of input stream. This data
+       struct ia_css_resolution  input_res; /** Resolution of input data */
+       enum ia_css_stream_format format; /** Format of input stream. This data
                                               format will be mapped to MIPI data
                                               type internally. */
-       int linked_isys_stream_id; /**< default value is -1, other value means
+       int linked_isys_stream_id; /** default value is -1, other value means
                                                        current isys_stream shares the same buffer with
                                                        indicated isys_stream*/
-       bool valid; /**< indicate whether other fields have valid value */
+       bool valid; /** indicate whether other fields have valid value */
 };
 
 struct ia_css_stream_input_config {
-       struct ia_css_resolution  input_res; /**< Resolution of input data */
-       struct ia_css_resolution  effective_res; /**< Resolution of input data.
+       struct ia_css_resolution  input_res; /** Resolution of input data */
+       struct ia_css_resolution  effective_res; /** Resolution of input data.
                                                        Used for CSS 2400/1 System and deprecated for other
                                                        systems (replaced by input_effective_res in
                                                        ia_css_pipe_config) */
-       enum ia_css_stream_format format; /**< Format of input stream. This data
+       enum ia_css_stream_format format; /** Format of input stream. This data
                                               format will be mapped to MIPI data
                                               type internally. */
-       enum ia_css_bayer_order bayer_order; /**< Bayer order for RAW streams */
+       enum ia_css_bayer_order bayer_order; /** Bayer order for RAW streams */
 };
 
 
-/** Input stream description. This describes how input will flow into the
+/* Input stream description. This describes how input will flow into the
  *  CSS. This is used to program the CSS hardware.
  */
 struct ia_css_stream_config {
-       enum ia_css_input_mode    mode; /**< Input mode */
+       enum ia_css_input_mode    mode; /** Input mode */
        union {
-               struct ia_css_input_port  port; /**< Port, for sensor only. */
-               struct ia_css_tpg_config  tpg;  /**< TPG configuration */
-               struct ia_css_prbs_config prbs; /**< PRBS configuration */
-       } source; /**< Source of input data */
-       unsigned int          channel_id; /**< Channel on which input data
+               struct ia_css_input_port  port; /** Port, for sensor only. */
+               struct ia_css_tpg_config  tpg;  /** TPG configuration */
+               struct ia_css_prbs_config prbs; /** PRBS configuration */
+       } source; /** Source of input data */
+       unsigned int          channel_id; /** Channel on which input data
                                                   will arrive. Use this field
                                                   to specify virtual channel id.
                                                   Valid values are: 0, 1, 2, 3 */
@@ -110,29 +110,29 @@ struct ia_css_stream_config {
         * and will be deprecated. In the future,all platforms will use the N*N method
         */
 #endif
-       unsigned int sensor_binning_factor; /**< Binning factor used by sensor
+       unsigned int sensor_binning_factor; /** Binning factor used by sensor
                                                 to produce image data. This is
                                                 used for shading correction. */
-       unsigned int pixels_per_clock; /**< Number of pixels per clock, which can be
+       unsigned int pixels_per_clock; /** Number of pixels per clock, which can be
                                            1, 2 or 4. */
-       bool online; /**< offline will activate RAW copy on SP, use this for
+       bool online; /** offline will activate RAW copy on SP, use this for
                          continuous capture. */
                /* ISYS2401 usage: ISP receives data directly from sensor, no copy. */
-       unsigned init_num_cont_raw_buf; /**< initial number of raw buffers to
+       unsigned init_num_cont_raw_buf; /** initial number of raw buffers to
                                             allocate */
-       unsigned target_num_cont_raw_buf; /**< total number of raw buffers to
+       unsigned target_num_cont_raw_buf; /** total number of raw buffers to
                                             allocate */
-       bool pack_raw_pixels; /**< Pack pixels in the raw buffers */
-       bool continuous; /**< Use SP copy feature to continuously capture frames
+       bool pack_raw_pixels; /** Pack pixels in the raw buffers */
+       bool continuous; /** Use SP copy feature to continuously capture frames
                              to system memory and run pipes in offline mode */
-       bool disable_cont_viewfinder; /**< disable continous viewfinder for ZSL use case */
-       int32_t flash_gpio_pin; /**< pin on which the flash is connected, -1 for no flash */
-       int left_padding; /**< The number of input-formatter left-paddings, -1 for default from binary.*/
-       struct ia_css_mipi_buffer_config mipi_buffer_config; /**< mipi buffer configuration */
-       struct ia_css_metadata_config   metadata_config;     /**< Metadata configuration. */
-       bool ia_css_enable_raw_buffer_locking; /**< Enable Raw Buffer Locking for HALv3 Support */
+       bool disable_cont_viewfinder; /** disable continous viewfinder for ZSL use case */
+       int32_t flash_gpio_pin; /** pin on which the flash is connected, -1 for no flash */
+       int left_padding; /** The number of input-formatter left-paddings, -1 for default from binary.*/
+       struct ia_css_mipi_buffer_config mipi_buffer_config; /** mipi buffer configuration */
+       struct ia_css_metadata_config   metadata_config;     /** Metadata configuration. */
+       bool ia_css_enable_raw_buffer_locking; /** Enable Raw Buffer Locking for HALv3 Support */
        bool lock_all;
-       /**< Lock all RAW buffers (true) or lock only buffers processed by
+       /** Lock all RAW buffers (true) or lock only buffers processed by
             video or preview pipe (false).
             This setting needs to be enabled to allow raw buffer locking
             without continuous viewfinder. */
@@ -140,15 +140,15 @@ struct ia_css_stream_config {
 
 struct ia_css_stream;
 
-/** Stream info, this struct describes properties of a stream after it has been
+/* Stream info, this struct describes properties of a stream after it has been
  *  created.
  */
 struct ia_css_stream_info {
        struct ia_css_metadata_info metadata_info;
-       /**< Info about the metadata layout, this contains the stride. */
+       /** Info about the metadata layout, this contains the stride. */
 };
 
-/** @brief Load default stream configuration
+/* @brief Load default stream configuration
  * @param[in,out]      stream_config The stream configuration.
  * @return     None
  *
@@ -165,7 +165,7 @@ void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config);
  * create the internal structures and fill in the configuration data and pipes
  */
 
- /** @brief Creates a stream
+ /* @brief Creates a stream
  * @param[in]  stream_config The stream configuration.
  * @param[in]  num_pipes The number of pipes to incorporate in the stream.
  * @param[in]  pipes The pipes.
@@ -180,7 +180,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
                                         struct ia_css_pipe *pipes[],
                                         struct ia_css_stream **stream);
 
-/** @brief Destroys a stream
+/* @brief Destroys a stream
  * @param[in]  stream The stream.
  * @return     IA_CSS_SUCCESS or the error code.
  *
@@ -189,7 +189,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
 enum ia_css_err
 ia_css_stream_destroy(struct ia_css_stream *stream);
 
-/** @brief Provides information about a stream
+/* @brief Provides information about a stream
  * @param[in]  stream The stream.
  * @param[out] stream_info The information about the stream.
  * @return     IA_CSS_SUCCESS or the error code.
@@ -200,7 +200,7 @@ enum ia_css_err
 ia_css_stream_get_info(const struct ia_css_stream *stream,
                       struct ia_css_stream_info *stream_info);
 
-/** @brief load (rebuild) a stream that was unloaded.
+/* @brief load (rebuild) a stream that was unloaded.
  * @param[in]  stream The stream
  * @return             IA_CSS_SUCCESS or the error code
  *
@@ -210,7 +210,7 @@ ia_css_stream_get_info(const struct ia_css_stream *stream,
 enum ia_css_err
 ia_css_stream_load(struct ia_css_stream *stream);
 
-/** @brief Starts the stream.
+/* @brief Starts the stream.
  * @param[in]  stream The stream.
  * @return IA_CSS_SUCCESS or the error code.
  *
@@ -223,7 +223,7 @@ ia_css_stream_load(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_stream_start(struct ia_css_stream *stream);
 
-/** @brief Stop the stream.
+/* @brief Stop the stream.
  * @param[in]  stream The stream.
  * @return     IA_CSS_SUCCESS or the error code.
  *
@@ -233,7 +233,7 @@ ia_css_stream_start(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_stream_stop(struct ia_css_stream *stream);
 
-/** @brief Check if a stream has stopped
+/* @brief Check if a stream has stopped
  * @param[in]  stream The stream.
  * @return     boolean flag
  *
@@ -242,7 +242,7 @@ ia_css_stream_stop(struct ia_css_stream *stream);
 bool
 ia_css_stream_has_stopped(struct ia_css_stream *stream);
 
-/** @brief     destroy a stream according to the stream seed previosly saved in the seed array.
+/* @brief      destroy a stream according to the stream seed previosly saved in the seed array.
  * @param[in]  stream The stream.
  * @return     IA_CSS_SUCCESS (no other errors are generated now)
  *
@@ -251,7 +251,7 @@ ia_css_stream_has_stopped(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_stream_unload(struct ia_css_stream *stream);
 
-/** @brief Returns stream format
+/* @brief Returns stream format
  * @param[in]  stream The stream.
  * @return     format of the string
  *
@@ -260,7 +260,7 @@ ia_css_stream_unload(struct ia_css_stream *stream);
 enum ia_css_stream_format
 ia_css_stream_get_format(const struct ia_css_stream *stream);
 
-/** @brief Check if the stream is configured for 2 pixels per clock
+/* @brief Check if the stream is configured for 2 pixels per clock
  * @param[in]  stream The stream.
  * @return     boolean flag
  *
@@ -270,7 +270,7 @@ ia_css_stream_get_format(const struct ia_css_stream *stream);
 bool
 ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
 
-/** @brief Sets the output frame stride (at the last pipe)
+/* @brief Sets the output frame stride (at the last pipe)
  * @param[in]  stream The stream
  * @param[in]  output_padded_width - the output buffer stride.
  * @return     ia_css_err
@@ -280,7 +280,7 @@ ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width);
 
-/** @brief Return max number of continuous RAW frames.
+/* @brief Return max number of continuous RAW frames.
  * @param[in]  stream The stream.
  * @param[out] buffer_depth The maximum number of continuous RAW frames.
  * @return     IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
@@ -291,7 +291,7 @@ ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int
 enum ia_css_err
 ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
 
-/** @brief Set nr of continuous RAW frames to use.
+/* @brief Set nr of continuous RAW frames to use.
  *
  * @param[in]  stream The stream.
  * @param[in]  buffer_depth    Number of frames to set.
@@ -302,7 +302,7 @@ ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_dep
 enum ia_css_err
 ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth);
 
-/** @brief Get number of continuous RAW frames to use.
+/* @brief Get number of continuous RAW frames to use.
  * @param[in]  stream The stream.
  * @param[out] buffer_depth The number of frames to use
  * @return     IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
@@ -315,7 +315,7 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
 
 /* ===== CAPTURE ===== */
 
-/** @brief Configure the continuous capture
+/* @brief Configure the continuous capture
  *
  * @param[in]  stream          The stream.
  * @param[in]  num_captures    The number of RAW frames to be processed to
@@ -347,7 +347,7 @@ ia_css_stream_capture(struct ia_css_stream *stream,
                        unsigned int skip,
                        int offset);
 
-/** @brief Specify which raw frame to tag based on exp_id found in frame info
+/* @brief Specify which raw frame to tag based on exp_id found in frame info
  *
  * @param[in]  stream The stream.
  * @param[in]  exp_id  The exposure id of the raw frame to tag.
@@ -363,7 +363,7 @@ ia_css_stream_capture_frame(struct ia_css_stream *stream,
 
 /* ===== VIDEO ===== */
 
-/** @brief Send streaming data into the css input FIFO
+/* @brief Send streaming data into the css input FIFO
  *
  * @param[in]  stream  The stream.
  * @param[in]  data    Pointer to the pixels to be send.
@@ -395,7 +395,7 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
                               unsigned int width,
                               unsigned int height);
 
-/** @brief Start an input frame on the CSS input FIFO.
+/* @brief Start an input frame on the CSS input FIFO.
  *
  * @param[in]  stream The stream.
  * @return     None
@@ -411,7 +411,7 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
 void
 ia_css_stream_start_input_frame(const struct ia_css_stream *stream);
 
-/** @brief Send a line of input data into the CSS input FIFO.
+/* @brief Send a line of input data into the CSS input FIFO.
  *
  * @param[in]  stream          The stream.
  * @param[in]  data    Array of the first line of image data.
@@ -435,7 +435,7 @@ ia_css_stream_send_input_line(const struct ia_css_stream *stream,
                              const unsigned short *data2,
                              unsigned int width2);
 
-/** @brief Send a line of input embedded data into the CSS input FIFO.
+/* @brief Send a line of input embedded data into the CSS input FIFO.
  *
  * @param[in]  stream     Pointer of the stream.
  * @param[in]  format     Format of the embedded data.
@@ -457,7 +457,7 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
                              const unsigned short *data,
                              unsigned int width);
 
-/** @brief End an input frame on the CSS input FIFO.
+/* @brief End an input frame on the CSS input FIFO.
  *
  * @param[in]  stream  The stream.
  * @return     None
@@ -467,7 +467,7 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
 void
 ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
 
-/** @brief send a request flash command to SP
+/* @brief send a request flash command to SP
  *
  * @param[in]  stream The stream.
  * @return     None
@@ -481,7 +481,7 @@ ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
 void
 ia_css_stream_request_flash(struct ia_css_stream *stream);
 
-/** @brief Configure a stream with filter coefficients.
+/* @brief Configure a stream with filter coefficients.
  *        @deprecated {Replaced by
  *                                ia_css_pipe_set_isp_config_on_pipe()}
  *
@@ -503,7 +503,7 @@ ia_css_stream_set_isp_config_on_pipe(struct ia_css_stream *stream,
                             const struct ia_css_isp_config *config,
                             struct ia_css_pipe *pipe);
 
-/** @brief Configure a stream with filter coefficients.
+/* @brief Configure a stream with filter coefficients.
  *        @deprecated {Replaced by
  *                                ia_css_pipe_set_isp_config()}
  * @param[in]  stream  The stream.
@@ -523,7 +523,7 @@ ia_css_stream_set_isp_config(
        struct ia_css_stream *stream,
        const struct ia_css_isp_config *config);
 
-/** @brief Get selected configuration settings
+/* @brief Get selected configuration settings
  * @param[in]  stream  The stream.
  * @param[out] config  Configuration settings.
  * @return             None
@@ -532,7 +532,7 @@ void
 ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
                             struct ia_css_isp_config *config);
 
-/** @brief allocate continuous raw frames for continuous capture
+/* @brief allocate continuous raw frames for continuous capture
  * @param[in]  stream The stream.
  * @return IA_CSS_SUCCESS or error code.
  *
@@ -544,7 +544,7 @@ ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
 enum ia_css_err
 ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
 
-/** @brief allocate continuous raw frames for continuous capture
+/* @brief allocate continuous raw frames for continuous capture
  * @param[in]  stream The stream.
  * @return     IA_CSS_SUCCESS or error code.
  *
@@ -555,7 +555,7 @@ ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_update_continuous_frames(struct ia_css_stream *stream);
 
-/** @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
+/* @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
  * @param[in]  stream The stream.
  * @param[in]   exp_id exposure id that uniquely identifies the locked Raw Frame Buffer
  * @return      ia_css_err IA_CSS_SUCCESS or error code
@@ -567,7 +567,7 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id);
 
-/** @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
+/* @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
  * @param[in]   stream The stream.
  * @param[in]   enable - true, disable - false
  * @return      None
index 575bb28..b256d7c 100644 (file)
@@ -31,47 +31,47 @@ more details.
 #ifndef __IA_CSS_TIMER_H
 #define __IA_CSS_TIMER_H
 
-/** @file
+/* @file
  * Timer interface definitions
  */
 #include <type_support.h>              /* for uint32_t */
 #include "ia_css_err.h"
 
-/** @brief timer reading definition */
+/* @brief timer reading definition */
 typedef uint32_t clock_value_t;
 
-/** @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
+/* @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
 struct ia_css_clock_tick {
-       clock_value_t ticks; /**< measured time in ticks.*/
+       clock_value_t ticks; /** measured time in ticks.*/
 };
 
-/** @brief TIMER event codes */
+/* @brief TIMER event codes */
 enum ia_css_tm_event {
        IA_CSS_TM_EVENT_AFTER_INIT,
-       /**< Timer Event after Initialization */
+       /** Timer Event after Initialization */
        IA_CSS_TM_EVENT_MAIN_END,
-       /**< Timer Event after end of Main */
+       /** Timer Event after end of Main */
        IA_CSS_TM_EVENT_THREAD_START,
-       /**< Timer Event after thread start */
+       /** Timer Event after thread start */
        IA_CSS_TM_EVENT_FRAME_PROC_START,
-       /**< Timer Event after Frame Process Start */
+       /** Timer Event after Frame Process Start */
        IA_CSS_TM_EVENT_FRAME_PROC_END
-       /**< Timer Event after Frame Process End */
+       /** Timer Event after Frame Process End */
 };
 
-/** @brief code measurement common struct */
+/* @brief code measurement common struct */
 struct ia_css_time_meas {
-       clock_value_t   start_timer_value;      /**< measured time in ticks */
-       clock_value_t   end_timer_value;        /**< measured time in ticks */
+       clock_value_t   start_timer_value;      /** measured time in ticks */
+       clock_value_t   end_timer_value;        /** measured time in ticks */
 };
 
 /**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */
 #define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t)
-/** @brief checks to ensure correct alignment for ia_css_time_meas. */
+/* @brief checks to ensure correct alignment for ia_css_time_meas. */
 #define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \
                                        + sizeof(clock_value_t))
 
-/** @brief API to fetch timer count directly
+/* @brief API to fetch timer count directly
 *
 * @param curr_ts [out] measured count value
 * @return IA_CSS_SUCCESS if success
index 9238a33..81498bd 100644 (file)
 #ifndef __IA_CSS_TPG_H
 #define __IA_CSS_TPG_H
 
-/** @file
+/* @file
  * This file contains support for the test pattern generator (TPG)
  */
 
-/** Enumerate the TPG IDs.
+/* Enumerate the TPG IDs.
  */
 enum ia_css_tpg_id {
        IA_CSS_TPG_ID0,
@@ -35,7 +35,7 @@ enum ia_css_tpg_id {
  */
 #define N_CSS_TPG_IDS (IA_CSS_TPG_ID2+1)
 
-/** Enumerate the TPG modes.
+/* Enumerate the TPG modes.
  */
 enum ia_css_tpg_mode {
        IA_CSS_TPG_MODE_RAMP,
@@ -44,7 +44,7 @@ enum ia_css_tpg_mode {
        IA_CSS_TPG_MODE_MONO
 };
 
-/** @brief Configure the test pattern generator.
+/* @brief Configure the test pattern generator.
  *
  * Configure the Test Pattern Generator, the way these values are used to
  * generate the pattern can be seen in the HRT extension for the test pattern
index 5fec3d5..725b900 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef _IA_CSS_TYPES_H
 #define _IA_CSS_TYPES_H
 
-/** @file
+/* @file
  * This file contains types used for the ia_css parameters.
  * These types are in a separate file because they are expected
  * to be used in software layers that do not access the CSS API
@@ -58,7 +58,7 @@
 #include "isp/kernels/output/output_1.0/ia_css_output_types.h"
 
 #define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
-/**< Should be removed after Driver adaptation will be done */
+/** Should be removed after Driver adaptation will be done */
 
 #define IA_CSS_VERSION_MAJOR    2
 #define IA_CSS_VERSION_MINOR    0
@@ -69,8 +69,8 @@
 /* Min and max exposure IDs. These macros are here to allow
  * the drivers to get this information. Changing these macros
  * constitutes a CSS API change. */
-#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1   /**< Minimum exposure ID */
-#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /**< Maximum exposure ID */
+#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1   /** Minimum exposure ID */
+#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /** Maximum exposure ID */
 
 /* opaque types */
 struct ia_css_isp_parameters;
@@ -79,72 +79,72 @@ struct ia_css_memory_offsets;
 struct ia_css_config_memory_offsets;
 struct ia_css_state_memory_offsets;
 
-/** Virtual address within the CSS address space. */
+/* Virtual address within the CSS address space. */
 typedef uint32_t ia_css_ptr;
 
-/** Generic resolution structure.
+/* Generic resolution structure.
  */
 struct ia_css_resolution {
-       uint32_t width;  /**< Width */
-       uint32_t height; /**< Height */
+       uint32_t width;  /** Width */
+       uint32_t height; /** Height */
 };
 
-/** Generic coordinate structure.
+/* Generic coordinate structure.
  */
 struct ia_css_coordinate {
-       int32_t x;      /**< Value of a coordinate on the horizontal axis */
-       int32_t y;      /**< Value of a coordinate on the vertical axis */
+       int32_t x;      /** Value of a coordinate on the horizontal axis */
+       int32_t y;      /** Value of a coordinate on the vertical axis */
 };
 
-/** Vector with signed values. This is used to indicate motion for
+/* Vector with signed values. This is used to indicate motion for
  * Digital Image Stabilization.
  */
 struct ia_css_vector {
-       int32_t x; /**< horizontal motion (in pixels) */
-       int32_t y; /**< vertical motion (in pixels) */
+       int32_t x; /** horizontal motion (in pixels) */
+       int32_t y; /** vertical motion (in pixels) */
 };
 
 /* Short hands */
 #define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
 #define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
 
-/** CSS data descriptor */
+/* CSS data descriptor */
 struct ia_css_data {
-       ia_css_ptr address; /**< CSS virtual address */
-       uint32_t   size;    /**< Disabled if 0 */
+       ia_css_ptr address; /** CSS virtual address */
+       uint32_t   size;    /** Disabled if 0 */
 };
 
-/** Host data descriptor */
+/* Host data descriptor */
 struct ia_css_host_data {
-       char      *address; /**< Host address */
-       uint32_t   size;    /**< Disabled if 0 */
+       char      *address; /** Host address */
+       uint32_t   size;    /** Disabled if 0 */
 };
 
-/** ISP data descriptor */
+/* ISP data descriptor */
 struct ia_css_isp_data {
-       uint32_t   address; /**< ISP address */
-       uint32_t   size;    /**< Disabled if 0 */
+       uint32_t   address; /** ISP address */
+       uint32_t   size;    /** Disabled if 0 */
 };
 
-/** Shading Correction types. */
+/* Shading Correction types. */
 enum ia_css_shading_correction_type {
 #ifndef ISP2401
-       IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */
+       IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */
 #else
-       IA_CSS_SHADING_CORRECTION_NONE,  /**< Shading Correction is not processed in the pipe. */
-       IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
+       IA_CSS_SHADING_CORRECTION_NONE,  /** Shading Correction is not processed in the pipe. */
+       IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
 #endif
 
-       /**< More shading correction types can be added in the future. */
+       /** More shading correction types can be added in the future. */
 };
 
-/** Shading Correction information. */
+/* Shading Correction information. */
 struct ia_css_shading_info {
-       enum ia_css_shading_correction_type type; /**< Shading Correction type. */
+       enum ia_css_shading_correction_type type; /** Shading Correction type. */
 
-       union { /** Shading Correction information of each Shading Correction types. */
+       union { /* Shading Correction information of each Shading Correction types. */
 
-               /** Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
+               /* Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
                 *
                 *  This structure contains the information necessary to generate
                 *  the shading table required in the isp.
@@ -288,20 +288,20 @@ struct ia_css_shading_info {
                 */
                struct {
 #ifndef ISP2401
-                       uint32_t enable;        /**< Shading correction enabled.
+                       uint32_t enable;        /** Shading correction enabled.
                                                     0:disabled, 1:enabled */
-                       uint32_t num_hor_grids; /**< Number of data points per line
+                       uint32_t num_hor_grids; /** Number of data points per line
                                                     per color on shading table. */
-                       uint32_t num_ver_grids; /**< Number of lines of data points
+                       uint32_t num_ver_grids; /** Number of lines of data points
                                                     per color on shading table. */
-                       uint32_t bqs_per_grid_cell; /**< Grid cell size
+                       uint32_t bqs_per_grid_cell; /** Grid cell size
                                                in BQ(Bayer Quad) unit.
                                                (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
                                                Valid values are 8,16,32,64. */
 #else
-                       uint32_t num_hor_grids; /**< Number of data points per line per color on shading table. */
-                       uint32_t num_ver_grids; /**< Number of lines of data points per color on shading table. */
-                       uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ unit.
+                       uint32_t num_hor_grids; /** Number of data points per line per color on shading table. */
+                       uint32_t num_ver_grids; /** Number of lines of data points per color on shading table. */
+                       uint32_t bqs_per_grid_cell; /** Grid cell size in BQ unit.
                                                         NOTE: bqs = size in BQ(Bayer Quad) unit.
                                                               1BQ means {Gr,R,B,Gb} (2x2 pixels).
                                                               Horizontal 1 bqs corresponds to horizontal 2 pixels.
@@ -310,13 +310,13 @@ struct ia_css_shading_info {
                        uint32_t bayer_scale_hor_ratio_in;
                        uint32_t bayer_scale_hor_ratio_out;
 #ifndef ISP2401
-                       /**< Horizontal ratio of bayer scaling
+                       /** Horizontal ratio of bayer scaling
                        between input width and output width, for the scaling
                        which should be done before shading correction.
                          output_width = input_width * bayer_scale_hor_ratio_out
                                                / bayer_scale_hor_ratio_in */
 #else
-                               /**< Horizontal ratio of bayer scaling between input width and output width,
+                               /** Horizontal ratio of bayer scaling between input width and output width,
                                     for the scaling which should be done before shading correction.
                                        output_width = input_width * bayer_scale_hor_ratio_out
                                                                        / bayer_scale_hor_ratio_in + 0.5 */
@@ -324,30 +324,30 @@ struct ia_css_shading_info {
                        uint32_t bayer_scale_ver_ratio_in;
                        uint32_t bayer_scale_ver_ratio_out;
 #ifndef ISP2401
-                       /**< Vertical ratio of bayer scaling
+                       /** Vertical ratio of bayer scaling
                        between input height and output height, for the scaling
                        which should be done before shading correction.
                          output_height = input_height * bayer_scale_ver_ratio_out
                                                / bayer_scale_ver_ratio_in */
                        uint32_t sc_bayer_origin_x_bqs_on_shading_table;
-                       /**< X coordinate (in bqs) of bayer origin on shading table.
+                       /** X coordinate (in bqs) of bayer origin on shading table.
                        This indicates the left-most pixel of bayer
                        (not include margin) inputted to the shading correction.
                        This corresponds to the left-most pixel of bayer
                        inputted to isp from sensor. */
                        uint32_t sc_bayer_origin_y_bqs_on_shading_table;
-                       /**< Y coordinate (in bqs) of bayer origin on shading table.
+                       /** Y coordinate (in bqs) of bayer origin on shading table.
                        This indicates the top pixel of bayer
                        (not include margin) inputted to the shading correction.
                        This corresponds to the top pixel of bayer
                        inputted to isp from sensor. */
 #else
-                               /**< Vertical ratio of bayer scaling between input height and output height,
+                               /** Vertical ratio of bayer scaling between input height and output height,
                                     for the scaling which should be done before shading correction.
                                        output_height = input_height * bayer_scale_ver_ratio_out
                                                                        / bayer_scale_ver_ratio_in + 0.5 */
                        struct ia_css_resolution isp_input_sensor_data_res_bqs;
-                               /**< Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
+                               /** Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
                                     NOTE: This is NOT the size of the physical sensor size.
                                           CSS requests the driver that ISP inputs sensor data
                                           by the size of isp_input_sensor_data_res_bqs.
@@ -357,22 +357,22 @@ struct ia_css_shading_info {
                                           ISP assumes the area of isp_input_sensor_data_res_bqs
                                           is centered on the physical sensor. */
                        struct ia_css_resolution sensor_data_res_bqs;
-                               /**< Sensor data size (in bqs) at shading correction.
+                               /** Sensor data size (in bqs) at shading correction.
                                     This is the size AFTER bayer scaling. */
                        struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
-                               /**< Origin of sensor data area positioned on shading table at shading correction.
+                               /** Origin of sensor data area positioned on shading table at shading correction.
                                     The coordinate x,y should be positive values. */
 #endif
                } type_1;
 
-               /**< More structures can be added here when more shading correction types will be added
+               /** More structures can be added here when more shading correction types will be added
                     in the future. */
        } info;
 };
 
 #ifndef ISP2401
 
-/** Default Shading Correction information of Shading Correction Type 1. */
+/* Default Shading Correction information of Shading Correction Type 1. */
 #define DEFAULT_SHADING_INFO_TYPE_1 \
 { \
        IA_CSS_SHADING_CORRECTION_TYPE_1,       /* type */ \
@@ -394,7 +394,7 @@ struct ia_css_shading_info {
 
 #else
 
-/** Default Shading Correction information of Shading Correction Type 1. */
+/* Default Shading Correction information of Shading Correction Type 1. */
 #define DEFAULT_SHADING_INFO_TYPE_1 \
 { \
        IA_CSS_SHADING_CORRECTION_TYPE_1,       /* type */ \
@@ -416,27 +416,27 @@ struct ia_css_shading_info {
 
 #endif
 
-/** Default Shading Correction information. */
+/* Default Shading Correction information. */
 #define DEFAULT_SHADING_INFO   DEFAULT_SHADING_INFO_TYPE_1
 
-/** structure that describes the 3A and DIS grids */
+/* structure that describes the 3A and DIS grids */
 struct ia_css_grid_info {
-       /** \name ISP input size
+       /* \name ISP input size
          * that is visible for user
          * @{
          */
        uint32_t isp_in_width;
        uint32_t isp_in_height;
-       /** @}*/
+       /* @}*/
 
-       struct ia_css_3a_grid_info  s3a_grid; /**< 3A grid info */
+       struct ia_css_3a_grid_info  s3a_grid; /** 3A grid info */
        union ia_css_dvs_grid_u dvs_grid;
-               /**< All types of DVS statistics grid info union */
+               /** All types of DVS statistics grid info union */
 
        enum ia_css_vamem_type vamem_type;
 };
 
-/** defaults for ia_css_grid_info structs */
+/* defaults for ia_css_grid_info structs */
 #define DEFAULT_GRID_INFO \
 { \
        0,                              /* isp_in_width */ \
@@ -446,25 +446,25 @@ struct ia_css_grid_info {
        IA_CSS_VAMEM_TYPE_1             /* vamem_type */ \
 }
 
-/** Morphing table, used for geometric distortion and chromatic abberration
+/* Morphing table, used for geometric distortion and chromatic abberration
  *  correction (GDCAC, also called GDC).
  *  This table describes the imperfections introduced by the lens, the
  *  advanced ISP can correct for these imperfections using this table.
  */
 struct ia_css_morph_table {
-       uint32_t enable; /**< To disable GDC, set this field to false. The
+       uint32_t enable; /** To disable GDC, set this field to false. The
                          coordinates fields can be set to NULL in this case. */
-       uint32_t height; /**< Table height */
-       uint32_t width;  /**< Table width */
+       uint32_t height; /** Table height */
+       uint32_t width;  /** Table width */
        uint16_t *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES];
-       /**< X coordinates that describe the sensor imperfection */
+       /** X coordinates that describe the sensor imperfection */
        uint16_t *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES];
-       /**< Y coordinates that describe the sensor imperfection */
+       /** Y coordinates that describe the sensor imperfection */
 };
 
 struct ia_css_dvs_6axis_config {
        unsigned int exp_id;
-       /**< Exposure ID, see ia_css_event_public.h for more detail */
+       /** Exposure ID, see ia_css_event_public.h for more detail */
        uint32_t width_y;
        uint32_t height_y;
        uint32_t width_uv;
@@ -479,16 +479,16 @@ struct ia_css_dvs_6axis_config {
  * This specifies the coordinates (x,y)
  */
 struct ia_css_point {
-       int32_t x; /**< x coordinate */
-       int32_t y; /**< y coordinate */
+       int32_t x; /** x coordinate */
+       int32_t y; /** y coordinate */
 };
 
 /**
  * This specifies the region
  */
 struct ia_css_region {
-       struct ia_css_point origin; /**< Starting point coordinates for the region */
-       struct ia_css_resolution resolution; /**< Region resolution */
+       struct ia_css_point origin; /** Starting point coordinates for the region */
+       struct ia_css_resolution resolution; /** Region resolution */
 };
 
 /**
@@ -509,30 +509,30 @@ struct ia_css_region {
  * y + height <= effective input height
  */
 struct ia_css_dz_config {
-       uint32_t dx; /**< Horizontal zoom factor */
-       uint32_t dy; /**< Vertical zoom factor */
-       struct ia_css_region zoom_region; /**< region for zoom */
+       uint32_t dx; /** Horizontal zoom factor */
+       uint32_t dy; /** Vertical zoom factor */
+       struct ia_css_region zoom_region; /** region for zoom */
 };
 
-/** The still capture mode, this can be RAW (simply copy sensor input to DDR),
+/* The still capture mode, this can be RAW (simply copy sensor input to DDR),
  *  Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR).
  */
 enum ia_css_capture_mode {
-       IA_CSS_CAPTURE_MODE_RAW,      /**< no processing, copy data only */
-       IA_CSS_CAPTURE_MODE_BAYER,    /**< bayer processing, up to demosaic */
-       IA_CSS_CAPTURE_MODE_PRIMARY,  /**< primary ISP */
-       IA_CSS_CAPTURE_MODE_ADVANCED, /**< advanced ISP (GDC) */
-       IA_CSS_CAPTURE_MODE_LOW_LIGHT /**< low light ISP (ANR) */
+       IA_CSS_CAPTURE_MODE_RAW,      /** no processing, copy data only */
+       IA_CSS_CAPTURE_MODE_BAYER,    /** bayer processing, up to demosaic */
+       IA_CSS_CAPTURE_MODE_PRIMARY,  /** primary ISP */
+       IA_CSS_CAPTURE_MODE_ADVANCED, /** advanced ISP (GDC) */
+       IA_CSS_CAPTURE_MODE_LOW_LIGHT /** low light ISP (ANR) */
 };
 
 struct ia_css_capture_config {
-       enum ia_css_capture_mode mode; /**< Still capture mode */
-       uint32_t enable_xnr;           /**< Enable/disable XNR */
+       enum ia_css_capture_mode mode; /** Still capture mode */
+       uint32_t enable_xnr;           /** Enable/disable XNR */
        uint32_t enable_raw_output;
-       bool enable_capture_pp_bli;    /**< Enable capture_pp_bli mode */
+       bool enable_capture_pp_bli;    /** Enable capture_pp_bli mode */
 };
 
-/** default settings for ia_css_capture_config structs */
+/* default settings for ia_css_capture_config structs */
 #define DEFAULT_CAPTURE_CONFIG \
 { \
        IA_CSS_CAPTURE_MODE_PRIMARY,    /* mode (capture) */ \
@@ -542,7 +542,7 @@ struct ia_css_capture_config {
 }
 
 
-/** ISP filter configuration. This is a collection of configurations
+/* ISP filter configuration. This is a collection of configurations
  *  for each of the ISP filters (modules).
  *
  *  NOTE! The contents of all pointers is copied when get or set with the
@@ -557,98 +557,98 @@ struct ia_css_capture_config {
  *    ["ISP block", 2only] : ISP block is used only for ISP2.
  */
 struct ia_css_isp_config {
-       struct ia_css_wb_config   *wb_config;   /**< White Balance
+       struct ia_css_wb_config   *wb_config;   /** White Balance
                                                        [WB1, 1&2] */
-       struct ia_css_cc_config   *cc_config;   /**< Color Correction
+       struct ia_css_cc_config   *cc_config;   /** Color Correction
                                                        [CSC1, 1only] */
-       struct ia_css_tnr_config  *tnr_config;  /**< Temporal Noise Reduction
+       struct ia_css_tnr_config  *tnr_config;  /** Temporal Noise Reduction
                                                        [TNR1, 1&2] */
-       struct ia_css_ecd_config  *ecd_config;  /**< Eigen Color Demosaicing
+       struct ia_css_ecd_config  *ecd_config;  /** Eigen Color Demosaicing
                                                        [DE2, 2only] */
-       struct ia_css_ynr_config  *ynr_config;  /**< Y(Luma) Noise Reduction
+       struct ia_css_ynr_config  *ynr_config;  /** Y(Luma) Noise Reduction
                                                        [YNR2&YEE2, 2only] */
-       struct ia_css_fc_config   *fc_config;   /**< Fringe Control
+       struct ia_css_fc_config   *fc_config;   /** Fringe Control
                                                        [FC2, 2only] */
-       struct ia_css_formats_config   *formats_config; /**< Formats Control for main output
+       struct ia_css_formats_config   *formats_config; /** Formats Control for main output
                                                        [FORMATS, 1&2] */
-       struct ia_css_cnr_config  *cnr_config;  /**< Chroma Noise Reduction
+       struct ia_css_cnr_config  *cnr_config;  /** Chroma Noise Reduction
                                                        [CNR2, 2only] */
-       struct ia_css_macc_config *macc_config; /**< MACC
+       struct ia_css_macc_config *macc_config; /** MACC
                                                        [MACC2, 2only] */
-       struct ia_css_ctc_config  *ctc_config;  /**< Chroma Tone Control
+       struct ia_css_ctc_config  *ctc_config;  /** Chroma Tone Control
                                                        [CTC2, 2only] */
-       struct ia_css_aa_config   *aa_config;   /**< YUV Anti-Aliasing
+       struct ia_css_aa_config   *aa_config;   /** YUV Anti-Aliasing
                                                        [AA2, 2only]
                                                        (not used currently) */
-       struct ia_css_aa_config   *baa_config;  /**< Bayer Anti-Aliasing
+       struct ia_css_aa_config   *baa_config;  /** Bayer Anti-Aliasing
                                                        [BAA2, 1&2] */
-       struct ia_css_ce_config   *ce_config;   /**< Chroma Enhancement
+       struct ia_css_ce_config   *ce_config;   /** Chroma Enhancement
                                                        [CE1, 1only] */
        struct ia_css_dvs_6axis_config *dvs_6axis_config;
-       struct ia_css_ob_config   *ob_config;  /**< Objective Black
+       struct ia_css_ob_config   *ob_config;  /** Objective Black
                                                        [OB1, 1&2] */
-       struct ia_css_dp_config   *dp_config;  /**< Defect Pixel Correction
+       struct ia_css_dp_config   *dp_config;  /** Defect Pixel Correction
                                                        [DPC1/DPC2, 1&2] */
-       struct ia_css_nr_config   *nr_config;  /**< Noise Reduction
+       struct ia_css_nr_config   *nr_config;  /** Noise Reduction
                                                        [BNR1&YNR1&CNR1, 1&2]*/
-       struct ia_css_ee_config   *ee_config;  /**< Edge Enhancement
+       struct ia_css_ee_config   *ee_config;  /** Edge Enhancement
                                                        [YEE1, 1&2] */
-       struct ia_css_de_config   *de_config;  /**< Demosaic
+       struct ia_css_de_config   *de_config;  /** Demosaic
                                                        [DE1, 1only] */
-       struct ia_css_gc_config   *gc_config;  /**< Gamma Correction (for YUV)
+       struct ia_css_gc_config   *gc_config;  /** Gamma Correction (for YUV)
                                                        [GC1, 1only] */
-       struct ia_css_anr_config  *anr_config; /**< Advanced Noise Reduction */
-       struct ia_css_3a_config   *s3a_config; /**< 3A Statistics config */
-       struct ia_css_xnr_config  *xnr_config; /**< eXtra Noise Reduction */
-       struct ia_css_dz_config   *dz_config;  /**< Digital Zoom */
-       struct ia_css_cc_config *yuv2rgb_cc_config; /**< Color Correction
+       struct ia_css_anr_config  *anr_config; /** Advanced Noise Reduction */
+       struct ia_css_3a_config   *s3a_config; /** 3A Statistics config */
+       struct ia_css_xnr_config  *xnr_config; /** eXtra Noise Reduction */
+       struct ia_css_dz_config   *dz_config;  /** Digital Zoom */
+       struct ia_css_cc_config *yuv2rgb_cc_config; /** Color Correction
                                                        [CCM2, 2only] */
-       struct ia_css_cc_config *rgb2yuv_cc_config; /**< Color Correction
+       struct ia_css_cc_config *rgb2yuv_cc_config; /** Color Correction
                                                        [CSC2, 2only] */
-       struct ia_css_macc_table  *macc_table;  /**< MACC
+       struct ia_css_macc_table  *macc_table;  /** MACC
                                                        [MACC1/MACC2, 1&2]*/
-       struct ia_css_gamma_table *gamma_table; /**< Gamma Correction (for YUV)
+       struct ia_css_gamma_table *gamma_table; /** Gamma Correction (for YUV)
                                                        [GC1, 1only] */
-       struct ia_css_ctc_table   *ctc_table;   /**< Chroma Tone Control
+       struct ia_css_ctc_table   *ctc_table;   /** Chroma Tone Control
                                                        [CTC1, 1only] */
 
-       /** \deprecated */
-       struct ia_css_xnr_table   *xnr_table;   /**< eXtra Noise Reduction
+       /* \deprecated */
+       struct ia_css_xnr_table   *xnr_table;   /** eXtra Noise Reduction
                                                        [XNR1, 1&2] */
-       struct ia_css_rgb_gamma_table *r_gamma_table;/**< sRGB Gamma Correction
+       struct ia_css_rgb_gamma_table *r_gamma_table;/** sRGB Gamma Correction
                                                        [GC2, 2only] */
-       struct ia_css_rgb_gamma_table *g_gamma_table;/**< sRGB Gamma Correction
+       struct ia_css_rgb_gamma_table *g_gamma_table;/** sRGB Gamma Correction
                                                        [GC2, 2only] */
-       struct ia_css_rgb_gamma_table *b_gamma_table;/**< sRGB Gamma Correction
+       struct ia_css_rgb_gamma_table *b_gamma_table;/** sRGB Gamma Correction
                                                        [GC2, 2only] */
-       struct ia_css_vector      *motion_vector; /**< For 2-axis DVS */
+       struct ia_css_vector      *motion_vector; /** For 2-axis DVS */
        struct ia_css_shading_table *shading_table;
        struct ia_css_morph_table   *morph_table;
-       struct ia_css_dvs_coefficients *dvs_coefs; /**< DVS 1.0 coefficients */
-       struct ia_css_dvs2_coefficients *dvs2_coefs; /**< DVS 2.0 coefficients */
+       struct ia_css_dvs_coefficients *dvs_coefs; /** DVS 1.0 coefficients */
+       struct ia_css_dvs2_coefficients *dvs2_coefs; /** DVS 2.0 coefficients */
        struct ia_css_capture_config   *capture_config;
        struct ia_css_anr_thres   *anr_thres;
-       /** @deprecated{Old shading settings, see bugzilla bz675 for details} */
+       /* @deprecated{Old shading settings, see bugzilla bz675 for details} */
        struct ia_css_shading_settings *shading_settings;
-       struct ia_css_xnr3_config *xnr3_config; /**< eXtreme Noise Reduction v3 */
-       /** comment from Lasse: Be aware how this feature will affect coordinate
+       struct ia_css_xnr3_config *xnr3_config; /** eXtreme Noise Reduction v3 */
+       /* comment from Lasse: Be aware how this feature will affect coordinate
         *  normalization in different parts of the system. (e.g. face detection,
         *  touch focus, 3A statistics and windows of interest, shading correction,
         *  DVS, GDC) from IQ tool level and application level down-to ISP FW level.
         *  the risk for regression is not in the individual blocks, but how they
         *  integrate together. */
-       struct ia_css_output_config   *output_config;   /**< Main Output Mirroring, flipping */
+       struct ia_css_output_config   *output_config;   /** Main Output Mirroring, flipping */
 
 #ifdef ISP2401
-       struct ia_css_tnr3_kernel_config         *tnr3_config;           /**< TNR3 config */
+       struct ia_css_tnr3_kernel_config         *tnr3_config;           /** TNR3 config */
 #endif
-       struct ia_css_scaler_config              *scaler_config;         /**< Skylake: scaler config (optional) */
-       struct ia_css_formats_config             *formats_config_display;/**< Formats control for viewfinder/display output (optional)
+       struct ia_css_scaler_config              *scaler_config;         /** Skylake: scaler config (optional) */
+       struct ia_css_formats_config             *formats_config_display;/** Formats control for viewfinder/display output (optional)
                                                                                [OSYS, n/a] */
-       struct ia_css_output_config              *output_config_display; /**< Viewfinder/display output mirroring, flipping (optional) */
+       struct ia_css_output_config              *output_config_display; /** Viewfinder/display output mirroring, flipping (optional) */
 
-       struct ia_css_frame                      *output_frame;          /**< Output frame the config is to be applied to (optional) */
-       uint32_t                        isp_config_id;  /**< Unique ID to track which config was actually applied to a particular frame */
+       struct ia_css_frame                      *output_frame;          /** Output frame the config is to be applied to (optional) */
+       uint32_t                        isp_config_id;  /** Unique ID to track which config was actually applied to a particular frame */
 };
 
 #endif /* _IA_CSS_TYPES_H */
index 48c5989..1e88901 100644 (file)
 #ifndef __IA_CSS_VERSION_H
 #define __IA_CSS_VERSION_H
 
-/** @file
+/* @file
  * This file contains functions to retrieve CSS-API version information
  */
 
 #include <ia_css_err.h>
 
-/** a common size for the version arrays */
+/* a common size for the version arrays */
 #define MAX_VERSION_SIZE       500
 
-/** @brief Retrieves the current CSS version
+/* @brief Retrieves the current CSS version
  * @param[out] version         A pointer to a buffer where to put the generated
  *                             version string. NULL is ignored.
  * @param[in]  max_size        Size of the version buffer. If version string
index 834eedb..0b95bf9 100644 (file)
 #ifndef __IA_CSS_AA2_TYPES_H
 #define __IA_CSS_AA2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Anti-Aliasing parameters.
 */
 
 
-/** Anti-Aliasing configuration.
+/* Anti-Aliasing configuration.
  *
  *  This structure is used both for YUV AA and Bayer AA.
  *
@@ -39,7 +39,7 @@
  *     ISP2: BAA2 is used.
  */
 struct ia_css_aa_config {
-       uint16_t strength;      /**< Strength of the filter.
+       uint16_t strength;      /** Strength of the filter.
                                        u0.13, [0,8191],
                                        default/ineffective 0 */
 };
index e205574..dc317a8 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_ANR_TYPES_H
 #define __IA_CSS_ANR_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Advanced Noise Reduction kernel v1
 */
 
 #define ANR_BPP                 10
 #define ANR_ELEMENT_BITS        ((CEIL_DIV(ANR_BPP, 8))*8)
 
-/** Advanced Noise Reduction configuration.
+/* Advanced Noise Reduction configuration.
  *  This is also known as Low-Light.
  */
 struct ia_css_anr_config {
-       int32_t threshold; /**< Threshold */
+       int32_t threshold; /** Threshold */
        int32_t thresholds[4*4*4];
        int32_t factors[3];
 };
index 3832ada..9b61131 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_ANR2_TYPES_H
 #define __IA_CSS_ANR2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Advanced Noise Reduction kernel v2
 */
 
@@ -23,7 +23,7 @@
 
 #define ANR_PARAM_SIZE          13
 
-/** Advanced Noise Reduction (ANR) thresholds */
+/* Advanced Noise Reduction (ANR) thresholds */
 struct ia_css_anr_thres {
        int16_t data[13*64];
 };
index 4a28985..3121417 100644 (file)
@@ -18,7 +18,7 @@
 #include "vmem.h"
 #include "ia_css_anr2_types.h"
 
-/** Advanced Noise Reduction (ANR) thresholds */
+/* Advanced Noise Reduction (ANR) thresholds */
 
 struct ia_css_isp_anr2_params {
        VMEM_ARRAY(data, ANR_PARAM_SIZE*ISP_VEC_NELEMS);
index 75ca760..a0d3554 100644 (file)
@@ -27,7 +27,7 @@
 #define BAYER_QUAD_HEIGHT 2
 #define NOF_BAYER_VECTORS 4
 
-/** bayer load/store */
+/* bayer load/store */
 struct sh_css_isp_bayer_ls_isp_config {
        uint32_t base_address[NUM_BAYER_LS];
        uint32_t width[NUM_BAYER_LS];
index 9ae27a9..ec1688e 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_BH_TYPES_H
 #define __IA_CSS_BH_TYPES_H
 
-/** Number of elements in the BH table.
+/* Number of elements in the BH table.
   * Should be consistent with hmem.h
   */
 #define IA_CSS_HMEM_BH_TABLE_SIZE      ISP_HIST_DEPTH
@@ -27,7 +27,7 @@
 #define BH_COLOR_Y     (3)
 #define BH_COLOR_NUM   (4)
 
-/** BH table */
+/* BH table */
 struct ia_css_bh_table {
        uint32_t hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
 };
index 219fb83..87e0f19 100644 (file)
 #ifndef __IA_CSS_BNLM_TYPES_H
 #define __IA_CSS_BNLM_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Bayer Non-Linear Mean parameters.
 */
 
 #include "type_support.h" /* int32_t */
 
-/** Bayer Non-Linear Mean configuration
+/* Bayer Non-Linear Mean configuration
  *
  * \brief BNLM public parameters.
  * \details Struct with all parameters for the BNLM kernel that can be set
  * ISP2.6.1: BNLM is used.
  */
 struct ia_css_bnlm_config {
-       bool            rad_enable;     /**< Enable a radial dependency in a weight calculation */
-       int32_t         rad_x_origin;   /**< Initial x coordinate for a radius calculation */
-       int32_t         rad_y_origin;   /**< Initial x coordinate for a radius calculation */
+       bool            rad_enable;     /** Enable a radial dependency in a weight calculation */
+       int32_t         rad_x_origin;   /** Initial x coordinate for a radius calculation */
+       int32_t         rad_y_origin;   /** Initial x coordinate for a radius calculation */
        /* a threshold for average of weights if this < Th, do not denoise pixel */
        int32_t         avg_min_th;
        /* minimum weight for denoising if max < th, do not denoise pixel */
        int32_t         max_min_th;
 
        /**@{*/
-       /** Coefficient for approximation, in the form of (1 + x / N)^N,
+       /* Coefficient for approximation, in the form of (1 + x / N)^N,
         * that fits the first-order exp() to default exp_lut in BNLM sheet
         * */
        int32_t         exp_coeff_a;
@@ -48,55 +48,55 @@ struct ia_css_bnlm_config {
        uint32_t        exp_exponent;
        /**@}*/
 
-       int32_t nl_th[3];       /**< Detail thresholds */
+       int32_t nl_th[3];       /** Detail thresholds */
 
-       /** Index for n-th maximum candidate weight for each detail group */
+       /* Index for n-th maximum candidate weight for each detail group */
        int32_t match_quality_max_idx[4];
 
        /**@{*/
-       /** A lookup table for 1/sqrt(1+mu) approximation */
+       /* A lookup table for 1/sqrt(1+mu) approximation */
        int32_t mu_root_lut_thr[15];
        int32_t mu_root_lut_val[16];
        /**@}*/
        /**@{*/
-       /** A lookup table for SAD normalization */
+       /* A lookup table for SAD normalization */
        int32_t sad_norm_lut_thr[15];
        int32_t sad_norm_lut_val[16];
        /**@}*/
        /**@{*/
-       /** A lookup table that models a weight's dependency on textures */
+       /* A lookup table that models a weight's dependency on textures */
        int32_t sig_detail_lut_thr[15];
        int32_t sig_detail_lut_val[16];
        /**@}*/
        /**@{*/
-       /** A lookup table that models a weight's dependency on a pixel's radial distance */
+       /* A lookup table that models a weight's dependency on a pixel's radial distance */
        int32_t sig_rad_lut_thr[15];
        int32_t sig_rad_lut_val[16];
        /**@}*/
        /**@{*/
-       /** A lookup table to control denoise power depending on a pixel's radial distance */
+       /* A lookup table to control denoise power depending on a pixel's radial distance */
        int32_t rad_pow_lut_thr[15];
        int32_t rad_pow_lut_val[16];
        /**@}*/
        /**@{*/
-       /** Non linear transfer functions to calculate the blending coefficient depending on detail group */
-       /** detail group 0 */
+       /* Non linear transfer functions to calculate the blending coefficient depending on detail group */
+       /* detail group 0 */
        /**@{*/
        int32_t nl_0_lut_thr[15];
        int32_t nl_0_lut_val[16];
        /**@}*/
        /**@{*/
-       /** detail group 1 */
+       /* detail group 1 */
        int32_t nl_1_lut_thr[15];
        int32_t nl_1_lut_val[16];
        /**@}*/
        /**@{*/
-       /** detail group 2 */
+       /* detail group 2 */
        int32_t nl_2_lut_thr[15];
        int32_t nl_2_lut_val[16];
        /**@}*/
        /**@{*/
-       /** detail group 3 */
+       /* detail group 3 */
        int32_t nl_3_lut_thr[15];
        int32_t nl_3_lut_val[16];
        /**@}*/
index be80f70..551bd0e 100644 (file)
 #ifndef __IA_CSS_BNR2_2_TYPES_H
 #define __IA_CSS_BNR2_2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Bayer Noise Reduction parameters.
 */
 
 #include "type_support.h" /* int32_t */
 
-/** Bayer Noise Reduction 2.2 configuration
+/* Bayer Noise Reduction 2.2 configuration
  *
  * \brief BNR2_2 public parameters.
  * \details Struct with all parameters for the BNR2.2 kernel that can be set
  */
 struct ia_css_bnr2_2_config {
        /**@{*/
-       /** Directional variance gain for R/G/B components in dark region */
+       /* Directional variance gain for R/G/B components in dark region */
        int32_t d_var_gain_r;
        int32_t d_var_gain_g;
        int32_t d_var_gain_b;
        /**@}*/
        /**@{*/
-       /** Slope of Directional variance gain between dark and bright region */
+       /* Slope of Directional variance gain between dark and bright region */
        int32_t d_var_gain_slope_r;
        int32_t d_var_gain_slope_g;
        int32_t d_var_gain_slope_b;
        /**@}*/
        /**@{*/
-       /** Non-Directional variance gain for R/G/B components in dark region */
+       /* Non-Directional variance gain for R/G/B components in dark region */
        int32_t n_var_gain_r;
        int32_t n_var_gain_g;
        int32_t n_var_gain_b;
        /**@}*/
        /**@{*/
-       /** Slope of Non-Directional variance gain between dark and bright region */
+       /* Slope of Non-Directional variance gain between dark and bright region */
        int32_t n_var_gain_slope_r;
        int32_t n_var_gain_slope_g;
        int32_t n_var_gain_slope_b;
        /**@}*/
 
-       int32_t dir_thres;              /**< Threshold for directional filtering */
-       int32_t dir_thres_w;            /**< Threshold width for directional filtering */
-       int32_t var_offset_coef;        /**< Variance offset coefficient */
-       int32_t dir_gain;               /**< Gain for directional coefficient */
-       int32_t detail_gain;            /**< Gain for low contrast texture control */
-       int32_t detail_gain_divisor;    /**< Gain divisor for low contrast texture control */
-       int32_t detail_level_offset;    /**< Bias value for low contrast texture control */
-       int32_t d_var_th_min;           /**< Minimum clipping value for directional variance*/
-       int32_t d_var_th_max;           /**< Maximum clipping value for diretional variance*/
-       int32_t n_var_th_min;           /**< Minimum clipping value for non-directional variance*/
-       int32_t n_var_th_max;           /**< Maximum clipping value for non-directional variance*/
+       int32_t dir_thres;              /** Threshold for directional filtering */
+       int32_t dir_thres_w;            /** Threshold width for directional filtering */
+       int32_t var_offset_coef;        /** Variance offset coefficient */
+       int32_t dir_gain;               /** Gain for directional coefficient */
+       int32_t detail_gain;            /** Gain for low contrast texture control */
+       int32_t detail_gain_divisor;    /** Gain divisor for low contrast texture control */
+       int32_t detail_level_offset;    /** Bias value for low contrast texture control */
+       int32_t d_var_th_min;           /** Minimum clipping value for directional variance*/
+       int32_t d_var_th_max;           /** Maximum clipping value for diretional variance*/
+       int32_t n_var_th_min;           /** Minimum clipping value for non-directional variance*/
+       int32_t n_var_th_max;           /** Maximum clipping value for non-directional variance*/
 };
 
 #endif /* __IA_CSS_BNR2_2_TYPES_H */
index 6df6c2b..3ebc069 100644 (file)
 #ifndef __IA_CSS_CNR2_TYPES_H
 #define __IA_CSS_CNR2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Chroma Noise Reduction (CNR) parameters
 */
 
-/** Chroma Noise Reduction configuration.
+/* Chroma Noise Reduction configuration.
  *
  *  Small sensitivity of edge means strong smoothness and NR performance.
  *  If you see blurred color on vertical edges,
  *  ISP2: CNR2 is used for Still.
  */
 struct ia_css_cnr_config {
-       uint16_t coring_u;      /**< Coring level of U.
+       uint16_t coring_u;      /** Coring level of U.
                                u0.13, [0,8191], default/ineffective 0 */
-       uint16_t coring_v;      /**< Coring level of V.
+       uint16_t coring_v;      /** Coring level of V.
                                u0.13, [0,8191], default/ineffective 0 */
-       uint16_t sense_gain_vy; /**< Sensitivity of horizontal edge of Y.
+       uint16_t sense_gain_vy; /** Sensitivity of horizontal edge of Y.
                                u13.0, [0,8191], default 100, ineffective 8191 */
-       uint16_t sense_gain_vu; /**< Sensitivity of horizontal edge of U.
+       uint16_t sense_gain_vu; /** Sensitivity of horizontal edge of U.
                                u13.0, [0,8191], default 100, ineffective 8191 */
-       uint16_t sense_gain_vv; /**< Sensitivity of horizontal edge of V.
+       uint16_t sense_gain_vv; /** Sensitivity of horizontal edge of V.
                                u13.0, [0,8191], default 100, ineffective 8191 */
-       uint16_t sense_gain_hy; /**< Sensitivity of vertical edge of Y.
+       uint16_t sense_gain_hy; /** Sensitivity of vertical edge of Y.
                                u13.0, [0,8191], default 50, ineffective 8191 */
-       uint16_t sense_gain_hu; /**< Sensitivity of vertical edge of U.
+       uint16_t sense_gain_hu; /** Sensitivity of vertical edge of U.
                                u13.0, [0,8191], default 50, ineffective 8191 */
-       uint16_t sense_gain_hv; /**< Sensitivity of vertical edge of V.
+       uint16_t sense_gain_hv; /** Sensitivity of vertical edge of V.
                                u13.0, [0,8191], default 50, ineffective 8191 */
 };
 
index 3f11442..47a38fd 100644 (file)
  *
  */
 struct ia_css_conversion_config {
-       uint32_t en;     /**< en parameter */
-       uint32_t dummy0; /**< dummy0 dummy parameter 0 */
-       uint32_t dummy1; /**< dummy1 dummy parameter 1 */
-       uint32_t dummy2; /**< dummy2 dummy parameter 2 */
+       uint32_t en;     /** en parameter */
+       uint32_t dummy0; /** dummy0 dummy parameter 0 */
+       uint32_t dummy1; /** dummy1 dummy parameter 1 */
+       uint32_t dummy2; /** dummy2 dummy parameter 2 */
 };
 
 #endif /* __IA_CSS_CONVERSION_TYPES_H */
index 8bfc8da..0f1812c 100644 (file)
@@ -19,7 +19,7 @@
 #include "dma.h"
 #include "sh_css_internal.h" /* sh_css_crop_pos */
 
-/** Crop frame */
+/* Crop frame */
 struct sh_css_isp_crop_isp_config {
        uint32_t width_a_over_b;
        struct dma_port_config port_b;
index 54ced07..1040438 100644 (file)
 #ifndef __IA_CSS_CSC_TYPES_H
 #define __IA_CSS_CSC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Color Space Conversion parameters.
 */
 
-/** Color Correction configuration.
+/* Color Correction configuration.
  *
  *  This structure is used for 3 cases.
  *  ("YCgCo" is the output format of Demosaic.)
@@ -68,9 +68,9 @@
  *     4096    -3430   -666
  */
 struct ia_css_cc_config {
-       uint32_t fraction_bits;/**< Fractional bits of matrix.
+       uint32_t fraction_bits;/** Fractional bits of matrix.
                                        u8.0, [0,13] */
-       int32_t matrix[3 * 3]; /**< Conversion matrix.
+       int32_t matrix[3 * 3]; /** Conversion matrix.
                                        s[13-fraction_bits].[fraction_bits],
                                        [-8192,8191] */
 };
index c66e823..ad7040c 100644 (file)
 
 /*VMEM Luma params*/
 struct ia_css_isp_ctc2_vmem_params {
-       /**< Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
+       /** Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
        VMEM_ARRAY(y_x, ISP_VEC_NELEMS);
-       /** kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
+       /* kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
        VMEM_ARRAY(y_y, ISP_VEC_NELEMS);
-       /** Slopes of lines interconnecting
+       /* Slopes of lines interconnecting
         *  0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/
        VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS);
 };
@@ -34,15 +34,15 @@ struct ia_css_isp_ctc2_vmem_params {
 /*DMEM Chroma params*/
 struct ia_css_isp_ctc2_dmem_params {
 
-       /** Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
+       /* Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
        int32_t uv_y0;
        int32_t uv_y1;
 
-       /** Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
+       /* Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
        int32_t uv_x0;
        int32_t uv_x1;
 
-       /** Slope of line interconnecting uv_x0 -> uv_x1*/
+       /* Slope of line interconnecting uv_x0 -> uv_x1*/
        int32_t uv_dydx;
 
 };
index 7b75f01..1222cf3 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_CTC2_TYPES_H
 #define __IA_CSS_CTC2_TYPES_H
 
-/** Chroma Tone Control configuration.
+/* Chroma Tone Control configuration.
 *
 *  ISP block: CTC2 (CTC by polygonal approximation)
 * (ISP1: CTC1 (CTC by look-up table) is used.)
@@ -24,7 +24,7 @@
 */
 struct ia_css_ctc2_config {
 
-       /**< Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
+       /** Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
        *   --default/ineffective value: 4096(0.5f)
        */
        int32_t y_y0;
@@ -33,19 +33,19 @@ struct ia_css_ctc2_config {
        int32_t y_y3;
        int32_t y_y4;
        int32_t y_y5;
-       /** 1st-4th  kneepoints by Y(Luma) --default/ineffective value:n/a
+       /* 1st-4th  kneepoints by Y(Luma) --default/ineffective value:n/a
        *   requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0
        */
        int32_t y_x1;
        int32_t y_x2;
        int32_t y_x3;
        int32_t y_x4;
-       /** Gains by UV(Chroma) under threholds uv_x0 and uv_x1
+       /* Gains by UV(Chroma) under threholds uv_x0 and uv_x1
        *   --default/ineffective value: 4096(0.5f)
        */
        int32_t uv_y0;
        int32_t uv_y1;
-       /** Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
+       /* Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
        *   --default/ineffective value: n/a
        */
        int32_t uv_x0;
index 1da215b..4ac47ce 100644 (file)
 #ifndef __IA_CSS_CTC_TYPES_H
 #define __IA_CSS_CTC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Chroma Tone Control parameters.
 */
 
-/** Fractional bits for CTC gain (used only for ISP1).
+/* Fractional bits for CTC gain (used only for ISP1).
  *
  *  IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits
  *  of gain(=8), but also the bits(=5) to convert chroma
  */
 #define IA_CSS_CTC_COEF_SHIFT          13
 
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
 #define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2      10
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
 #define IA_CSS_VAMEM_1_CTC_TABLE_SIZE           (1U<<IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2)
 
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
 #define IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2      8
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
 #define IA_CSS_VAMEM_2_CTC_TABLE_SIZE           ((1U<<IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2) + 1)
 
 enum ia_css_vamem_type {
@@ -47,44 +47,44 @@ enum ia_css_vamem_type {
        IA_CSS_VAMEM_TYPE_2
 };
 
-/** Chroma Tone Control configuration.
+/* Chroma Tone Control configuration.
  *
  *  ISP block: CTC2 (CTC by polygonal line approximation)
  * (ISP1: CTC1 (CTC by look-up table) is used.)
  *  ISP2: CTC2 is used.
  */
 struct ia_css_ctc_config {
-       uint16_t y0;    /**< 1st kneepoint gain.
+       uint16_t y0;    /** 1st kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y1;    /**< 2nd kneepoint gain.
+       uint16_t y1;    /** 2nd kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y2;    /**< 3rd kneepoint gain.
+       uint16_t y2;    /** 3rd kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y3;    /**< 4th kneepoint gain.
+       uint16_t y3;    /** 4th kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y4;    /**< 5th kneepoint gain.
+       uint16_t y4;    /** 5th kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y5;    /**< 6th kneepoint gain.
+       uint16_t y5;    /** 6th kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t ce_gain_exp;   /**< Common exponent of y-axis gain.
+       uint16_t ce_gain_exp;   /** Common exponent of y-axis gain.
                                u8.0, [0,13],
                                default/ineffective 1 */
-       uint16_t x1;    /**< 2nd kneepoint luma.
+       uint16_t x1;    /** 2nd kneepoint luma.
                                u0.13, [0,8191], constraints: 0<x1<x2,
                                default/ineffective 1024 */
-       uint16_t x2;    /**< 3rd kneepoint luma.
+       uint16_t x2;    /** 3rd kneepoint luma.
                                u0.13, [0,8191], constraints: x1<x2<x3,
                                default/ineffective 2048 */
-       uint16_t x3;    /**< 4th kneepoint luma.
+       uint16_t x3;    /** 4th kneepoint luma.
                                u0.13, [0,8191], constraints: x2<x3<x4,
                                default/ineffective 6144 */
-       uint16_t x4;    /**< 5tn kneepoint luma.
+       uint16_t x4;    /** 5tn kneepoint luma.
                                u0.13, [0,8191], constraints: x3<x4<8191,
                                default/ineffective 7168 */
 };
@@ -94,7 +94,7 @@ union ia_css_ctc_data {
        uint16_t vamem_2[IA_CSS_VAMEM_2_CTC_TABLE_SIZE];
 };
 
-/** CTC table, used for Chroma Tone Control.
+/* CTC table, used for Chroma Tone Control.
  *
  *  ISP block: CTC1 (CTC by look-up table)
  *  ISP1: CTC1 is used.
index 525c838..803be68 100644 (file)
 #ifndef __IA_CSS_DE_TYPES_H
 #define __IA_CSS_DE_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Demosaic (bayer-to-YCgCo) parameters.
 */
 
-/** Demosaic (bayer-to-YCgCo) configuration.
+/* Demosaic (bayer-to-YCgCo) configuration.
  *
  *  ISP block: DE1
  *  ISP1: DE1 is used.
  * (ISP2: DE2 is used.)
  */
 struct ia_css_de_config {
-       ia_css_u0_16 pixelnoise; /**< Pixel noise used in moire elimination.
+       ia_css_u0_16 pixelnoise; /** Pixel noise used in moire elimination.
                                u0.16, [0,65535],
                                default 0, ineffective 0 */
-       ia_css_u0_16 c1_coring_threshold; /**< Coring threshold for C1.
+       ia_css_u0_16 c1_coring_threshold; /** Coring threshold for C1.
                                This is the same as nr_config.threshold_cb.
                                u0.16, [0,65535],
                                default 128(0.001953125), ineffective 0 */
-       ia_css_u0_16 c2_coring_threshold; /**< Coring threshold for C2.
+       ia_css_u0_16 c2_coring_threshold; /** Coring threshold for C2.
                                This is the same as nr_config.threshold_cr.
                                u0.16, [0,65535],
                                default 128(0.001953125), ineffective 0 */
index eac1b27..50bdde4 100644 (file)
 #ifndef __IA_CSS_DE2_TYPES_H
 #define __IA_CSS_DE2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Demosaicing parameters.
 */
 
-/** Eigen Color Demosaicing configuration.
+/* Eigen Color Demosaicing configuration.
  *
  *  ISP block: DE2
  * (ISP1: DE1 is used.)
  *  ISP2: DE2 is used.
  */
 struct ia_css_ecd_config {
-       uint16_t zip_strength;  /**< Strength of zipper reduction.
+       uint16_t zip_strength;  /** Strength of zipper reduction.
                                u0.13, [0,8191],
                                default 5489(0.67), ineffective 0 */
-       uint16_t fc_strength;   /**< Strength of false color reduction.
+       uint16_t fc_strength;   /** Strength of false color reduction.
                                u0.13, [0,8191],
                                default 8191(almost 1.0), ineffective 0 */
-       uint16_t fc_debias;     /**< Prevent color change
+       uint16_t fc_debias;     /** Prevent color change
                                     on noise or Gr/Gb imbalance.
                                u0.13, [0,8191],
                                default 0, ineffective 0 */
index b5d7b6b..1bf6dce 100644 (file)
 #ifndef __IA_CSS_DP_TYPES_H
 #define __IA_CSS_DP_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Defect Pixel Correction (DPC) parameters.
 */
 
 
-/** Defect Pixel Correction configuration.
+/* Defect Pixel Correction configuration.
  *
  *  ISP block: DPC1 (DPC after WB)
  *             DPC2 (DPC before WB)
  *  ISP2: DPC2 is used.
  */
 struct ia_css_dp_config {
-       ia_css_u0_16 threshold; /**< The threshold of defect pixel correction,
+       ia_css_u0_16 threshold; /** The threshold of defect pixel correction,
                              representing the permissible difference of
                              intensity between one pixel and its
                              surrounding pixels. Smaller values result
                                in more frequent pixel corrections.
                                u0.16, [0,65535],
                                default 8192, ineffective 65535 */
-       ia_css_u8_8 gain;        /**< The sensitivity of mis-correction. ISP will
+       ia_css_u8_8 gain;        /** The sensitivity of mis-correction. ISP will
                              miss a lot of defects if the value is set
                                too large.
                                u8.8, [0,65535],
index b2c9741..6727682 100644 (file)
 #ifndef __IA_CSS_DPC2_TYPES_H
 #define __IA_CSS_DPC2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters.
 */
 
 #include "type_support.h"
 
 /**@{*/
-/** Floating point constants for different metrics. */
+/* Floating point constants for different metrics. */
 #define METRIC1_ONE_FP (1<<12)
 #define METRIC2_ONE_FP (1<<5)
 #define METRIC3_ONE_FP (1<<12)
@@ -30,7 +30,7 @@
 /**@}*/
 
 /**@{*/
-/** Defect Pixel Correction 2 configuration.
+/* Defect Pixel Correction 2 configuration.
  *
  * \brief DPC2 public parameters.
  * \details Struct with all parameters for the Defect Pixel Correction 2
index 4d0abfe..66a7e58 100644 (file)
@@ -30,7 +30,7 @@
 #ifdef ISP2401
 
 #endif
-/** dvserence frame */
+/* dvserence frame */
 struct sh_css_isp_dvs_isp_config {
        uint32_t num_horizontal_blocks;
        uint32_t num_vertical_blocks;
index 07651f0..32e9182 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_EED1_8_TYPES_H
 #define __IA_CSS_EED1_8_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Edge Enhanced Demosaic parameters.
 */
 
  */
 #define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS  9
 
-/** Edge Enhanced Demosaic configuration
+/* Edge Enhanced Demosaic configuration
  *
  * ISP2.6.1: EED1_8 is used.
  */
 
 struct ia_css_eed1_8_config {
-       int32_t rbzp_strength;  /**< Strength of zipper reduction. */
-
-       int32_t fcstrength;     /**< Strength of false color reduction. */
-       int32_t fcthres_0;      /**< Threshold to prevent chroma coring due to noise or green disparity in dark region. */
-       int32_t fcthres_1;      /**< Threshold to prevent chroma coring due to noise or green disparity in bright region. */
-       int32_t fc_sat_coef;    /**< How much color saturation to maintain in high color saturation region. */
-       int32_t fc_coring_prm;  /**< Chroma coring coefficient for tint color suppression. */
-
-       int32_t aerel_thres0;   /**< Threshold for Non-Directional Reliability at dark region. */
-       int32_t aerel_gain0;    /**< Gain for Non-Directional Reliability at dark region. */
-       int32_t aerel_thres1;   /**< Threshold for Non-Directional Reliability at bright region. */
-       int32_t aerel_gain1;    /**< Gain for Non-Directional Reliability at bright region. */
-
-       int32_t derel_thres0;   /**< Threshold for Directional Reliability at dark region. */
-       int32_t derel_gain0;    /**< Gain for Directional Reliability at dark region. */
-       int32_t derel_thres1;   /**< Threshold for Directional Reliability at bright region. */
-       int32_t derel_gain1;    /**< Gain for Directional Reliability at bright region. */
-
-       int32_t coring_pos0;    /**< Positive Edge Coring Threshold in dark region. */
-       int32_t coring_pos1;    /**< Positive Edge Coring Threshold in bright region. */
-       int32_t coring_neg0;    /**< Negative Edge Coring Threshold in dark region. */
-       int32_t coring_neg1;    /**< Negative Edge Coring Threshold in bright region. */
-
-       int32_t gain_exp;       /**< Common Exponent of Gain. */
-       int32_t gain_pos0;      /**< Gain for Positive Edge in dark region. */
-       int32_t gain_pos1;      /**< Gain for Positive Edge in bright region. */
-       int32_t gain_neg0;      /**< Gain for Negative Edge in dark region. */
-       int32_t gain_neg1;      /**< Gain for Negative Edge in bright region. */
-
-       int32_t pos_margin0;    /**< Margin for Positive Edge in dark region. */
-       int32_t pos_margin1;    /**< Margin for Positive Edge in bright region. */
-       int32_t neg_margin0;    /**< Margin for Negative Edge in dark region. */
-       int32_t neg_margin1;    /**< Margin for Negative Edge in bright region. */
-
-       int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS];               /**< Segment data for directional edge weight: X. */
-       int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS];               /**< Segment data for directional edge weight: Y. */
-       int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)];     /**< Segment data for directional edge weight: Slope. */
-       int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)];       /**< Segment data for directional edge weight: Exponent. */
-       int32_t dedgew_max;     /**< Max Weight for Directional Edge. */
+       int32_t rbzp_strength;  /** Strength of zipper reduction. */
+
+       int32_t fcstrength;     /** Strength of false color reduction. */
+       int32_t fcthres_0;      /** Threshold to prevent chroma coring due to noise or green disparity in dark region. */
+       int32_t fcthres_1;      /** Threshold to prevent chroma coring due to noise or green disparity in bright region. */
+       int32_t fc_sat_coef;    /** How much color saturation to maintain in high color saturation region. */
+       int32_t fc_coring_prm;  /** Chroma coring coefficient for tint color suppression. */
+
+       int32_t aerel_thres0;   /** Threshold for Non-Directional Reliability at dark region. */
+       int32_t aerel_gain0;    /** Gain for Non-Directional Reliability at dark region. */
+       int32_t aerel_thres1;   /** Threshold for Non-Directional Reliability at bright region. */
+       int32_t aerel_gain1;    /** Gain for Non-Directional Reliability at bright region. */
+
+       int32_t derel_thres0;   /** Threshold for Directional Reliability at dark region. */
+       int32_t derel_gain0;    /** Gain for Directional Reliability at dark region. */
+       int32_t derel_thres1;   /** Threshold for Directional Reliability at bright region. */
+       int32_t derel_gain1;    /** Gain for Directional Reliability at bright region. */
+
+       int32_t coring_pos0;    /** Positive Edge Coring Threshold in dark region. */
+       int32_t coring_pos1;    /** Positive Edge Coring Threshold in bright region. */
+       int32_t coring_neg0;    /** Negative Edge Coring Threshold in dark region. */
+       int32_t coring_neg1;    /** Negative Edge Coring Threshold in bright region. */
+
+       int32_t gain_exp;       /** Common Exponent of Gain. */
+       int32_t gain_pos0;      /** Gain for Positive Edge in dark region. */
+       int32_t gain_pos1;      /** Gain for Positive Edge in bright region. */
+       int32_t gain_neg0;      /** Gain for Negative Edge in dark region. */
+       int32_t gain_neg1;      /** Gain for Negative Edge in bright region. */
+
+       int32_t pos_margin0;    /** Margin for Positive Edge in dark region. */
+       int32_t pos_margin1;    /** Margin for Positive Edge in bright region. */
+       int32_t neg_margin0;    /** Margin for Negative Edge in dark region. */
+       int32_t neg_margin1;    /** Margin for Negative Edge in bright region. */
+
+       int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS];               /** Segment data for directional edge weight: X. */
+       int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS];               /** Segment data for directional edge weight: Y. */
+       int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)];     /** Segment data for directional edge weight: Slope. */
+       int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)];       /** Segment data for directional edge weight: Exponent. */
+       int32_t dedgew_max;     /** Max Weight for Directional Edge. */
 };
 
 #endif /* __IA_CSS_EED1_8_TYPES_H */
index df1565a..4947957 100644 (file)
 #ifndef __IA_CSS_FORMATS_TYPES_H
 #define __IA_CSS_FORMATS_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for output format parameters.
 */
 
 #include "type_support.h"
 
-/** Formats configuration.
+/* Formats configuration.
  *
  *  ISP block: FORMATS
  *  ISP1: FORMATS is used.
  *  ISP2: FORMATS is used.
  */
 struct ia_css_formats_config {
-       uint32_t video_full_range_flag; /**< selects the range of YUV output.
+       uint32_t video_full_range_flag; /** selects the range of YUV output.
                                u8.0, [0,1],
                                default 1, ineffective n/a\n
                                1 - full range, luma 0-255, chroma 0-255\n
index 5a2f0c0..ef287fa 100644 (file)
 #ifndef __IA_CSS_FPN_TYPES_H
 #define __IA_CSS_FPN_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Fixed Pattern Noise parameters.
 */
 
-/** Fixed Pattern Noise table.
+/* Fixed Pattern Noise table.
  *
  *  This contains the fixed patterns noise values
  *  obtained from a black frame capture.
  */
 
 struct ia_css_fpn_table {
-       int16_t *data;          /**< Table content (fixed patterns noise).
+       int16_t *data;          /** Table content (fixed patterns noise).
                                        u0.[13-shift], [0,63] */
-       uint32_t width;         /**< Table width (in pixels).
+       uint32_t width;         /** Table width (in pixels).
                                        This is the input frame width. */
-       uint32_t height;        /**< Table height (in pixels).
+       uint32_t height;        /** Table height (in pixels).
                                        This is the input frame height. */
-       uint32_t shift;         /**< Common exponent of table content.
+       uint32_t shift;         /** Common exponent of table content.
                                        u8.0, [0,13] */
-       uint32_t enabled;       /**< Fpn is enabled.
+       uint32_t enabled;       /** Fpn is enabled.
                                        bool */
 };
 
index dd9f0ed..594807f 100644 (file)
 #ifndef __IA_CSS_GC_TYPES_H
 #define __IA_CSS_GC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Gamma Correction parameters.
 */
 
 #include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h"  /* FIXME: Needed for ia_css_vamem_type */
 
-/** Fractional bits for GAMMA gain */
+/* Fractional bits for GAMMA gain */
 #define IA_CSS_GAMMA_GAIN_K_SHIFT      13
 
-/** Number of elements in the gamma table. */
+/* Number of elements in the gamma table. */
 #define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2    10
 #define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE         (1U<<IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2)
 
-/** Number of elements in the gamma table. */
+/* Number of elements in the gamma table. */
 #define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2    8
 #define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE         ((1U<<IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2) + 1)
 
-/** Gamma table, used for Y(Luma) Gamma Correction.
+/* Gamma table, used for Y(Luma) Gamma Correction.
  *
  *  ISP block: GC1 (YUV Gamma Correction)
  *  ISP1: GC1 is used.
  * (ISP2: GC2(sRGB Gamma Correction) is used.)
  */
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
      IA_CSS_VAMEM_TYPE_2(ISP2400) */
 union ia_css_gc_data {
        uint16_t vamem_1[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE];
-       /**< Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
+       /** Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
        uint16_t vamem_2[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE];
-       /**< Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
+       /** Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
 };
 
 struct ia_css_gamma_table {
@@ -52,22 +52,22 @@ struct ia_css_gamma_table {
        union ia_css_gc_data data;
 };
 
-/** Gamma Correction configuration (used only for YUV Gamma Correction).
+/* Gamma Correction configuration (used only for YUV Gamma Correction).
  *
  *  ISP block: GC1 (YUV Gamma Correction)
  *  ISP1: GC1 is used.
  * (ISP2: GC2 (sRGB Gamma Correction) is used.)
   */
 struct ia_css_gc_config {
-       uint16_t gain_k1; /**< Gain to adjust U after YUV Gamma Correction.
+       uint16_t gain_k1; /** Gain to adjust U after YUV Gamma Correction.
                                u0.16, [0,65535],
                                default/ineffective 19000(0.29) */
-       uint16_t gain_k2; /**< Gain to adjust V after YUV Gamma Correction.
+       uint16_t gain_k2; /** Gain to adjust V after YUV Gamma Correction.
                                u0.16, [0,65535],
                                default/ineffective 19000(0.29) */
 };
 
-/** Chroma Enhancement configuration.
+/* Chroma Enhancement configuration.
  *
  *  This parameter specifies range of chroma output level.
  *  The standard range is [0,255] or [16,240].
@@ -77,20 +77,20 @@ struct ia_css_gc_config {
  * (ISP2: CE1 is not used.)
  */
 struct ia_css_ce_config {
-       uint8_t uv_level_min; /**< Minimum of chroma output level.
+       uint8_t uv_level_min; /** Minimum of chroma output level.
                                u0.8, [0,255], default/ineffective 0 */
-       uint8_t uv_level_max; /**< Maximum of chroma output level.
+       uint8_t uv_level_max; /** Maximum of chroma output level.
                                u0.8, [0,255], default/ineffective 255 */
 };
 
-/** Multi-Axes Color Correction (MACC) configuration.
+/* Multi-Axes Color Correction (MACC) configuration.
  *
  *  ISP block: MACC2 (MACC by matrix and exponent(ia_css_macc_config))
  * (ISP1: MACC1 (MACC by only matrix) is used.)
  *  ISP2: MACC2 is used.
  */
 struct ia_css_macc_config {
-       uint8_t exp;    /**< Common exponent of ia_css_macc_table.
+       uint8_t exp;    /** Common exponent of ia_css_macc_table.
                                u8.0, [0,13], default 1, ineffective 1 */
 };
 
index e439583..fab7467 100644 (file)
 
 #include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h"  /* FIXME: needed for ia_css_vamem_type */
 
-/** @file
+/* @file
 * CSS-API header file for Gamma Correction parameters.
 */
 
-/** sRGB Gamma table, used for sRGB Gamma Correction.
+/* sRGB Gamma table, used for sRGB Gamma Correction.
  *
  *  ISP block: GC2 (sRGB Gamma Correction)
  * (ISP1: GC1(YUV Gamma Correction) is used.)
  *  ISP2: GC2 is used.
  */
 
-/** Number of elements in the sRGB gamma table. */
+/* Number of elements in the sRGB gamma table. */
 #define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8
 #define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE      (1U<<IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2)
 
-/** Number of elements in the sRGB gamma table. */
+/* Number of elements in the sRGB gamma table. */
 #define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2    8
 #define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE     ((1U<<IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2) + 1)
 
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
      IA_CSS_VAMEM_TYPE_2(ISP2400) */
 union ia_css_rgb_gamma_data {
        uint16_t vamem_1[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE];
-       /**< RGB Gamma table on vamem type1. This table is not used,
+       /** RGB Gamma table on vamem type1. This table is not used,
                because sRGB Gamma Correction is not implemented for ISP2300. */
        uint16_t vamem_2[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE];
-               /**< RGB Gamma table on vamem type2. u0.12, [0,4095] */
+               /** RGB Gamma table on vamem type2. u0.12, [0,4095] */
 };
 
 struct ia_css_rgb_gamma_table {
index c3345b3..2646442 100644 (file)
  * \detail Currently HDR paramters are used only for testing purposes
  */
 struct ia_css_hdr_irradiance_params {
-       int test_irr;                                          /**< Test parameter */
-       int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];  /**< Histogram matching shift parameter */
-       int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /**< Histogram matching multiplication parameter */
-       int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];      /**< Weight map soft threshold low bound parameter */
-       int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];     /**< Weight map soft threshold high bound parameter */
-       int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /**< Soft threshold linear function coefficien */
-       int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /**< Soft threshold precision shift parameter */
-       int weight_bpp;                                        /**< Weight map bits per pixel */
+       int test_irr;                                          /** Test parameter */
+       int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];  /** Histogram matching shift parameter */
+       int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /** Histogram matching multiplication parameter */
+       int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];      /** Weight map soft threshold low bound parameter */
+       int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];     /** Weight map soft threshold high bound parameter */
+       int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /** Soft threshold linear function coefficien */
+       int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /** Soft threshold precision shift parameter */
+       int weight_bpp;                                        /** Weight map bits per pixel */
 };
 
 /**
@@ -39,7 +39,7 @@ struct ia_css_hdr_irradiance_params {
  * \detail Currently HDR paramters are used only for testing purposes
  */
 struct ia_css_hdr_deghost_params {
-       int test_deg; /**< Test parameter */
+       int test_deg; /** Test parameter */
 };
 
 /**
@@ -47,7 +47,7 @@ struct ia_css_hdr_deghost_params {
  * \detail Currently HDR paramters are used only for testing purposes
  */
 struct ia_css_hdr_exclusion_params {
-       int test_excl; /**< Test parameter */
+       int test_excl; /** Test parameter */
 };
 
 /**
@@ -56,9 +56,9 @@ struct ia_css_hdr_exclusion_params {
  * the CSS API. Currenly, only test paramters are defined.
  */
 struct ia_css_hdr_config {
-       struct ia_css_hdr_irradiance_params irradiance; /**< HDR irradiance paramaters */
-       struct ia_css_hdr_deghost_params    deghost;    /**< HDR deghosting parameters */
-       struct ia_css_hdr_exclusion_params  exclusion; /**< HDR exclusion parameters */
+       struct ia_css_hdr_irradiance_params irradiance; /** HDR irradiance paramaters */
+       struct ia_css_hdr_deghost_params    deghost;    /** HDR deghosting parameters */
+       struct ia_css_hdr_exclusion_params  exclusion; /** HDR exclusion parameters */
 };
 
 #endif /* __IA_CSS_HDR_TYPES_H */
index 3d510bf..9cd31c2 100644 (file)
 #ifndef __IA_CSS_MACC1_5_TYPES_H
 #define __IA_CSS_MACC1_5_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Multi-Axis Color Conversion algorithm parameters.
 */
 
-/** Multi-Axis Color Conversion configuration
+/* Multi-Axis Color Conversion configuration
  *
  * ISP2.6.1: MACC1_5 is used.
  */
 
 
-/** Number of axes in the MACC table. */
+/* Number of axes in the MACC table. */
 #define IA_CSS_MACC_NUM_AXES           16
-/** Number of coefficients per MACC axes. */
+/* Number of coefficients per MACC axes. */
 #define IA_CSS_MACC_NUM_COEFS          4
 
-/** Multi-Axes Color Correction (MACC) table.
+/* Multi-Axes Color Correction (MACC) table.
  *
  *  ISP block: MACC (MACC by only matrix)
  *             MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
  */
 struct ia_css_macc1_5_table {
        int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
-       /**< 16 of 2x2 matix
+       /** 16 of 2x2 matix
          MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
            default/ineffective: (s1.12)
                16 of "identity 2x2 matix" {4096,0,0,4096} */
 };
 
-/** Multi-Axes Color Correction (MACC) configuration.
+/* Multi-Axes Color Correction (MACC) configuration.
  *
  *  ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
  *  ISP2: MACC1_5 is used.
  */
 struct ia_css_macc1_5_config {
-       uint8_t exp;    /**< Common exponent of ia_css_macc_table.
+       uint8_t exp;    /** Common exponent of ia_css_macc_table.
                                u8.0, [0,13], default 1, ineffective 1 */
 };
 
index a25581c..2c9e5a8 100644 (file)
 #ifndef __IA_CSS_MACC_TYPES_H
 #define __IA_CSS_MACC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Multi-Axis Color Correction (MACC) parameters.
 */
 
-/** Number of axes in the MACC table. */
+/* Number of axes in the MACC table. */
 #define IA_CSS_MACC_NUM_AXES           16
-/** Number of coefficients per MACC axes. */
+/* Number of coefficients per MACC axes. */
 #define IA_CSS_MACC_NUM_COEFS          4
-/** The number of planes in the morphing table. */
+/* The number of planes in the morphing table. */
 
-/** Multi-Axis Color Correction (MACC) table.
+/* Multi-Axis Color Correction (MACC) table.
  *
  *  ISP block: MACC1 (MACC by only matrix)
  *             MACC2 (MACC by matrix and exponent(ia_css_macc_config))
@@ -51,7 +51,7 @@
 
 struct ia_css_macc_table {
        int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
-       /**< 16 of 2x2 matix
+       /** 16 of 2x2 matix
          MACC1: s2.13, [-65536,65535]
            default/ineffective:
                16 of "identity 2x2 matix" {8192,0,0,8192}
index eeaadfe..d981394 100644 (file)
 #ifndef __IA_CSS_OB2_TYPES_H
 #define __IA_CSS_OB2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Optical Black algorithm parameters.
 */
 
-/** Optical Black configuration
+/* Optical Black configuration
  *
  * ISP2.6.1: OB2 is used.
  */
 #include "ia_css_frac.h"
 
 struct ia_css_ob2_config {
-       ia_css_u0_16 level_gr;    /**< Black level for GR pixels.
+       ia_css_u0_16 level_gr;    /** Black level for GR pixels.
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16  level_r;     /**< Black level for R pixels.
+       ia_css_u0_16  level_r;     /** Black level for R pixels.
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16  level_b;     /**< Black level for B pixels.
+       ia_css_u0_16  level_b;     /** Black level for B pixels.
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16  level_gb;    /**< Black level for GB pixels.
+       ia_css_u0_16  level_gb;    /** Black level for GB pixels.
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
 };
index 88459b6..a9717b8 100644 (file)
 #ifndef __IA_CSS_OB_TYPES_H
 #define __IA_CSS_OB_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Optical Black level parameters.
 */
 
 #include "ia_css_frac.h"
 
-/** Optical black mode.
+/* Optical black mode.
  */
 enum ia_css_ob_mode {
-       IA_CSS_OB_MODE_NONE,    /**< OB has no effect. */
-       IA_CSS_OB_MODE_FIXED,   /**< Fixed OB */
-       IA_CSS_OB_MODE_RASTER   /**< Raster OB */
+       IA_CSS_OB_MODE_NONE,    /** OB has no effect. */
+       IA_CSS_OB_MODE_FIXED,   /** Fixed OB */
+       IA_CSS_OB_MODE_RASTER   /** Raster OB */
 };
 
-/** Optical Black level configuration.
+/* Optical Black level configuration.
  *
  *  ISP block: OB1
  *  ISP1: OB1 is used.
  *  ISP2: OB1 is used.
  */
 struct ia_css_ob_config {
-       enum ia_css_ob_mode mode; /**< Mode (None / Fixed / Raster).
+       enum ia_css_ob_mode mode; /** Mode (None / Fixed / Raster).
                                        enum, [0,2],
                                        default 1, ineffective 0 */
-       ia_css_u0_16 level_gr;    /**< Black level for GR pixels
+       ia_css_u0_16 level_gr;    /** Black level for GR pixels
                                        (used for Fixed Mode only).
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16 level_r;     /**< Black level for R pixels
+       ia_css_u0_16 level_r;     /** Black level for R pixels
                                        (used for Fixed Mode only).
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16 level_b;     /**< Black level for B pixels
+       ia_css_u0_16 level_b;     /** Black level for B pixels
                                        (used for Fixed Mode only).
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16 level_gb;    /**< Black level for GB pixels
+       ia_css_u0_16 level_gb;    /** Black level for GB pixels
                                        (used for Fixed Mode only).
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       uint16_t start_position; /**< Start position of OB area
+       uint16_t start_position; /** Start position of OB area
                                        (used for Raster Mode only).
                                        u16.0, [0,63],
                                        default/ineffective 0 */
-       uint16_t end_position;  /**< End position of OB area
+       uint16_t end_position;  /** End position of OB area
                                        (used for Raster Mode only).
                                        u16.0, [0,63],
                                        default/ineffective 0 */
index 26ec27e..eb7defa 100644 (file)
@@ -19,7 +19,7 @@
 #include "dma.h"
 #include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
 
-/** output frame */
+/* output frame */
 struct sh_css_isp_output_isp_config {
        uint32_t width_a_over_b;
        uint32_t height;
index 4335ac2..9c7342f 100644 (file)
 #ifndef __IA_CSS_OUTPUT_TYPES_H
 #define __IA_CSS_OUTPUT_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for parameters of output frames.
 */
 
-/** Output frame
+/* Output frame
  *
  *  ISP block: output frame
  */
@@ -40,8 +40,8 @@ struct ia_css_output1_configuration {
 };
 
 struct ia_css_output_config {
-       uint8_t enable_hflip;  /**< enable horizontal output mirroring */
-       uint8_t enable_vflip;  /**< enable vertical output mirroring */
+       uint8_t enable_hflip;  /** enable horizontal output mirroring */
+       uint8_t enable_vflip;  /** enable vertical output mirroring */
 };
 
 #endif /* __IA_CSS_OUTPUT_TYPES_H */
index 1f1b72a..026443b 100644 (file)
@@ -19,7 +19,7 @@
 #include "sh_css_defs.h"
 #include "dma.h"
 
-/** Reference frame */
+/* Reference frame */
 struct ia_css_ref_configuration {
        const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
        uint32_t dvs_frame_delay;
index f57ed1e..8d674d2 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_S3A_TYPES_H
 #define __IA_CSS_S3A_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for 3A statistics parameters.
 */
 
 #include "../../../../components/stats_3a/src/stats_3a_public.h"
 #endif
 
-/** 3A configuration. This configures the 3A statistics collection
+/* 3A configuration. This configures the 3A statistics collection
  *  module.
  */
  
-/** 3A statistics grid
+/* 3A statistics grid
  *
  *  ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE))
  *             S3A2 (3A Support for 3A ver.2 (Histogram is used for AE))
 struct ia_css_3a_grid_info {
 
 #if defined(SYSTEM_css_skycam_c0_system)
-       uint32_t ae_enable;                                     /**< ae enabled in binary,
+       uint32_t ae_enable;                                     /** ae enabled in binary,
                                                                   0:disabled, 1:enabled */
-       struct ae_public_config_grid_config     ae_grd_info;    /**< see description in ae_public.h*/
+       struct ae_public_config_grid_config     ae_grd_info;    /** see description in ae_public.h*/
 
-       uint32_t awb_enable;                                    /**< awb enabled in binary,
+       uint32_t awb_enable;                                    /** awb enabled in binary,
                                                                   0:disabled, 1:enabled */
-       struct awb_public_config_grid_config    awb_grd_info;   /**< see description in awb_public.h*/
+       struct awb_public_config_grid_config    awb_grd_info;   /** see description in awb_public.h*/
 
-       uint32_t af_enable;                                     /**< af enabled in binary,
+       uint32_t af_enable;                                     /** af enabled in binary,
                                                                   0:disabled, 1:enabled */
-       struct af_public_grid_config            af_grd_info;    /**< see description in af_public.h*/
+       struct af_public_grid_config            af_grd_info;    /** see description in af_public.h*/
 
-       uint32_t awb_fr_enable;                                 /**< awb_fr enabled in binary,
+       uint32_t awb_fr_enable;                                 /** awb_fr enabled in binary,
                                                                   0:disabled, 1:enabled */
-       struct awb_fr_public_grid_config        awb_fr_grd_info;/**< see description in awb_fr_public.h*/
+       struct awb_fr_public_grid_config        awb_fr_grd_info;/** see description in awb_fr_public.h*/
   
-        uint32_t elem_bit_depth;    /**< TODO:Taken from BYT  - need input from AIQ
+        uint32_t elem_bit_depth;    /** TODO:Taken from BYT  - need input from AIQ
                                        if needed for SKC
                                        Bit depth of element used
                                        to calculate 3A statistics.
@@ -63,34 +63,34 @@ struct ia_css_3a_grid_info {
                                        bayer bit depth in DSP. */
 
 #else
-       uint32_t enable;            /**< 3A statistics enabled.
+       uint32_t enable;            /** 3A statistics enabled.
                                        0:disabled, 1:enabled */
-       uint32_t use_dmem;          /**< DMEM or VMEM determines layout.
+       uint32_t use_dmem;          /** DMEM or VMEM determines layout.
                                        0:3A statistics are stored to VMEM,
                                        1:3A statistics are stored to DMEM */
-       uint32_t has_histogram;     /**< Statistics include histogram.
+       uint32_t has_histogram;     /** Statistics include histogram.
                                        0:no histogram, 1:has histogram */
-       uint32_t width;             /**< Width of 3A grid table.
+       uint32_t width;             /** Width of 3A grid table.
                                        (= Horizontal number of grid cells
                                        in table, which cells have effective
                                        statistics.) */
-       uint32_t height;            /**< Height of 3A grid table.
+       uint32_t height;            /** Height of 3A grid table.
                                        (= Vertical number of grid cells
                                        in table, which cells have effective
                                        statistics.) */
-       uint32_t aligned_width;     /**< Horizontal stride (for alloc).
+       uint32_t aligned_width;     /** Horizontal stride (for alloc).
                                        (= Horizontal number of grid cells
                                        in table, which means
                                        the allocated width.) */
-       uint32_t aligned_height;    /**< Vertical stride (for alloc).
+       uint32_t aligned_height;    /** Vertical stride (for alloc).
                                        (= Vertical number of grid cells
                                        in table, which means
                                        the allocated height.) */
-       uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit.
+       uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
                                        (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
                                        Valid values are 8,16,32,64. */
-       uint32_t deci_factor_log2;  /**< log2 of bqs_per_grid_cell. */
-       uint32_t elem_bit_depth;    /**< Bit depth of element used
+       uint32_t deci_factor_log2;  /** log2 of bqs_per_grid_cell. */
+       uint32_t elem_bit_depth;    /** Bit depth of element used
                                        to calculate 3A statistics.
                                        This is 13, which is the normalized
                                        bayer bit depth in DSP. */
@@ -148,7 +148,7 @@ struct ia_css_3a_grid_info {
  * However, that will require driver/ 3A lib modifications.
  */
 
-/** 3A configuration. This configures the 3A statistics collection
+/* 3A configuration. This configures the 3A statistics collection
  *  module.
  *
  *  ae_y_*: Coefficients to calculate luminance from bayer.
@@ -167,38 +167,38 @@ struct ia_css_3a_grid_info {
  *  ISP2: S3A2 and SDVS2 are used.
  */
 struct ia_css_3a_config {
-       ia_css_u0_16 ae_y_coef_r;       /**< Weight of R for Y.
+       ia_css_u0_16 ae_y_coef_r;       /** Weight of R for Y.
                                                u0.16, [0,65535],
                                                default/ineffective 25559 */
-       ia_css_u0_16 ae_y_coef_g;       /**< Weight of G for Y.
+       ia_css_u0_16 ae_y_coef_g;       /** Weight of G for Y.
                                                u0.16, [0,65535],
                                                default/ineffective 32768 */
-       ia_css_u0_16 ae_y_coef_b;       /**< Weight of B for Y.
+       ia_css_u0_16 ae_y_coef_b;       /** Weight of B for Y.
                                                u0.16, [0,65535],
                                                default/ineffective 7209 */
-       ia_css_u0_16 awb_lg_high_raw;   /**< AWB level gate high for raw.
+       ia_css_u0_16 awb_lg_high_raw;   /** AWB level gate high for raw.
                                                u0.16, [0,65535],
                                                default 65472(=1023*64),
                                                ineffective 65535 */
-       ia_css_u0_16 awb_lg_low;        /**< AWB level gate low.
+       ia_css_u0_16 awb_lg_low;        /** AWB level gate low.
                                                u0.16, [0,65535],
                                                default 64(=1*64),
                                                ineffective 0 */
-       ia_css_u0_16 awb_lg_high;       /**< AWB level gate high.
+       ia_css_u0_16 awb_lg_high;       /** AWB level gate high.
                                                u0.16, [0,65535],
                                                default 65535,
                                                ineffective 65535 */
-       ia_css_s0_15 af_fir1_coef[7];   /**< AF FIR coefficients of fir1.
+       ia_css_s0_15 af_fir1_coef[7];   /** AF FIR coefficients of fir1.
                                                s0.15, [-32768,32767],
                                default/ineffective
                                -6689,-12207,-32768,32767,12207,6689,0 */
-       ia_css_s0_15 af_fir2_coef[7];   /**< AF FIR coefficients of fir2.
+       ia_css_s0_15 af_fir2_coef[7];   /** AF FIR coefficients of fir2.
                                                s0.15, [-32768,32767],
                                default/ineffective
                                2053,0,-18437,32767,-18437,2053,0 */
 };
 
-/** 3A statistics. This structure describes the data stored
+/* 3A statistics. This structure describes the data stored
  *  in each 3A grid point.
  *
  *  ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE)
@@ -209,43 +209,43 @@ struct ia_css_3a_config {
  *  ISP2: S3A2 is used.
  */
 struct ia_css_3a_output {
-       int32_t ae_y;    /**< Sum of Y in a statistics window, for AE.
+       int32_t ae_y;    /** Sum of Y in a statistics window, for AE.
                                (u19.13) */
-       int32_t awb_cnt; /**< Number of effective pixels
+       int32_t awb_cnt; /** Number of effective pixels
                                in a statistics window.
                                Pixels passed by the AWB level gate check are
                                judged as "effective". (u32) */
-       int32_t awb_gr;  /**< Sum of Gr in a statistics window, for AWB.
+       int32_t awb_gr;  /** Sum of Gr in a statistics window, for AWB.
                                All Gr pixels (not only for effective pixels)
                                are summed. (u19.13) */
-       int32_t awb_r;   /**< Sum of R in a statistics window, for AWB.
+       int32_t awb_r;   /** Sum of R in a statistics window, for AWB.
                                All R pixels (not only for effective pixels)
                                are summed. (u19.13) */
-       int32_t awb_b;   /**< Sum of B in a statistics window, for AWB.
+       int32_t awb_b;   /** Sum of B in a statistics window, for AWB.
                                All B pixels (not only for effective pixels)
                                are summed. (u19.13) */
-       int32_t awb_gb;  /**< Sum of Gb in a statistics window, for AWB.
+       int32_t awb_gb;  /** Sum of Gb in a statistics window, for AWB.
                                All Gb pixels (not only for effective pixels)
                                are summed. (u19.13) */
-       int32_t af_hpf1; /**< Sum of |Y| following high pass filter af_fir1
+       int32_t af_hpf1; /** Sum of |Y| following high pass filter af_fir1
                                within a statistics window, for AF. (u19.13) */
-       int32_t af_hpf2; /**< Sum of |Y| following high pass filter af_fir2
+       int32_t af_hpf2; /** Sum of |Y| following high pass filter af_fir2
                                within a statistics window, for AF. (u19.13) */
 };
 
 
-/** 3A Statistics. This structure describes the statistics that are generated
+/* 3A Statistics. This structure describes the statistics that are generated
  *  using the provided configuration (ia_css_3a_config).
  */
 struct ia_css_3a_statistics {
-       struct ia_css_3a_grid_info    grid;     /**< grid info contains the dimensions of the 3A grid */
-       struct ia_css_3a_output      *data;     /**< the pointer to 3a_output[grid.width * grid.height]
+       struct ia_css_3a_grid_info    grid;     /** grid info contains the dimensions of the 3A grid */
+       struct ia_css_3a_output      *data;     /** the pointer to 3a_output[grid.width * grid.height]
                                                     containing the 3A statistics */
-       struct ia_css_3a_rgby_output *rgby_data;/**< the pointer to 3a_rgby_output[256]
+       struct ia_css_3a_rgby_output *rgby_data;/** the pointer to 3a_rgby_output[256]
                                                     containing the histogram */
 };
 
-/** Histogram (Statistics for AE).
+/* Histogram (Statistics for AE).
  *
  *  4 histograms(r,g,b,y),
  *  256 bins for each histogram, unsigned 24bit value for each bin.
@@ -256,10 +256,10 @@ struct ia_css_3a_statistics {
  *  ISP2: HIST2 is used.
  */
 struct ia_css_3a_rgby_output {
-       uint32_t r;     /**< Number of R of one bin of the histogram R. (u24) */
-       uint32_t g;     /**< Number of G of one bin of the histogram G. (u24) */
-       uint32_t b;     /**< Number of B of one bin of the histogram B. (u24) */
-       uint32_t y;     /**< Number of Y of one bin of the histogram Y. (u24) */
+       uint32_t r;     /** Number of R of one bin of the histogram R. (u24) */
+       uint32_t g;     /** Number of G of one bin of the histogram G. (u24) */
+       uint32_t b;     /** Number of B of one bin of the histogram B. (u24) */
+       uint32_t y;     /** Number of Y of one bin of the histogram Y. (u24) */
 };
 
 #endif /* __IA_CSS_S3A_TYPES_H */
index 44e3c43..b35ac3e 100644 (file)
@@ -32,7 +32,7 @@ ia_css_sc_dump(
        unsigned level);
 
 #ifdef ISP2401
-/** @brief Configure the shading correction.
+/* @brief Configure the shading correction.
  * @param[out] to      Parameters used in the shading correction kernel in the isp.
  * @param[in]  from    Parameters passed from the host.
  * @param[in]  size    Size of the sh_css_isp_sc_isp_config structure.
@@ -45,7 +45,7 @@ ia_css_sc_config(
        const struct ia_css_sc_configuration *from,
        unsigned size);
 
-/** @brief Configure the shading correction.
+/* @brief Configure the shading correction.
  * @param[in]  binary  The binary, which has the shading correction.
  * @param[in]  internal_frame_origin_x_bqs_on_sctbl
  *                     X coordinate (in bqs) of the origin of the internal frame on the shading table.
index 5a833bc..30ce499 100644 (file)
 #ifndef __IA_CSS_SC_TYPES_H
 #define __IA_CSS_SC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Lens Shading Correction (SC) parameters.
 */
 
 
-/** Number of color planes in the shading table. */
+/* Number of color planes in the shading table. */
 #define IA_CSS_SC_NUM_COLORS           4
 
-/** The 4 colors that a shading table consists of.
+/* The 4 colors that a shading table consists of.
  *  For each color we store a grid of values.
  */
 enum ia_css_sc_color {
-       IA_CSS_SC_COLOR_GR, /**< Green on a green-red line */
-       IA_CSS_SC_COLOR_R,  /**< Red */
-       IA_CSS_SC_COLOR_B,  /**< Blue */
-       IA_CSS_SC_COLOR_GB  /**< Green on a green-blue line */
+       IA_CSS_SC_COLOR_GR, /** Green on a green-red line */
+       IA_CSS_SC_COLOR_R,  /** Red */
+       IA_CSS_SC_COLOR_B,  /** Blue */
+       IA_CSS_SC_COLOR_GB  /** Green on a green-blue line */
 };
 
-/** Lens Shading Correction table.
+/* Lens Shading Correction table.
  *
  *  This describes the color shading artefacts
  *  introduced by lens imperfections. To correct artefacts,
@@ -64,39 +64,39 @@ enum ia_css_sc_color {
  *  ISP2: SC1 is used.
  */
 struct ia_css_shading_table {
-       uint32_t enable; /**< Set to false for no shading correction.
+       uint32_t enable; /** Set to false for no shading correction.
                          The data field can be NULL when enable == true */
 /* ------ deprecated(bz675) : from ------ */
-       uint32_t sensor_width;  /**< Native sensor width in pixels. */
-       uint32_t sensor_height; /**< Native sensor height in lines.
+       uint32_t sensor_width;  /** Native sensor width in pixels. */
+       uint32_t sensor_height; /** Native sensor height in lines.
                When shading_settings.enable_shading_table_conversion is set
                as 0, sensor_width and sensor_height are NOT used.
                These are used only in the legacy shading table conversion
                in the css, when shading_settings.
                enable_shading_table_conversion is set as 1. */
 /* ------ deprecated(bz675) : to ------ */
-       uint32_t width;  /**< Number of data points per line per color.
+       uint32_t width;  /** Number of data points per line per color.
                                u8.0, [0,81] */
-       uint32_t height; /**< Number of lines of data points per color.
+       uint32_t height; /** Number of lines of data points per color.
                                u8.0, [0,61] */
-       uint32_t fraction_bits; /**< Bits of fractional part in the data
+       uint32_t fraction_bits; /** Bits of fractional part in the data
                                points.
                                u8.0, [0,13] */
        uint16_t *data[IA_CSS_SC_NUM_COLORS];
-       /**< Table data, one array for each color.
+       /** Table data, one array for each color.
             Use ia_css_sc_color to index this array.
             u[13-fraction_bits].[fraction_bits], [0,8191] */
 };
 
 /* ------ deprecated(bz675) : from ------ */
-/** Shading Correction settings.
+/* Shading Correction settings.
  *
  *  NOTE:
  *  This structure should be removed when the shading table conversion is
  *  removed from the css.
  */
 struct ia_css_shading_settings {
-       uint32_t enable_shading_table_conversion; /**< Set to 0,
+       uint32_t enable_shading_table_conversion; /** Set to 0,
                if the conversion of the shading table should be disabled
                in the css. (default 1)
                  0: The shading table is directly sent to the isp.
@@ -119,14 +119,14 @@ struct ia_css_shading_settings {
 
 #ifdef ISP2401
 
-/** Shading Correction configuration.
+/* Shading Correction configuration.
  *
  *  NOTE: The shading table size is larger than or equal to the internal frame size.
  */
 struct ia_css_sc_configuration {
-       uint32_t internal_frame_origin_x_bqs_on_sctbl; /**< Origin X (in bqs) of internal frame on shading table. */
-       uint32_t internal_frame_origin_y_bqs_on_sctbl; /**< Origin Y (in bqs) of internal frame on shading table. */
-                                               /**< NOTE: bqs = size in BQ(Bayer Quad) unit.
+       uint32_t internal_frame_origin_x_bqs_on_sctbl; /** Origin X (in bqs) of internal frame on shading table. */
+       uint32_t internal_frame_origin_y_bqs_on_sctbl; /** Origin Y (in bqs) of internal frame on shading table. */
+                                               /** NOTE: bqs = size in BQ(Bayer Quad) unit.
                                                        1BQ means {Gr,R,B,Gb}(2x2 pixels).
                                                        Horizontal 1 bqs corresponds to horizontal 2 pixels.
                                                        Vertical 1 bqs corresponds to vertical 2 pixels. */
index 295dc60..031983c 100644 (file)
 #ifndef __IA_CSS_SDIS_COMMON_TYPES_H
 #define __IA_CSS_SDIS_COMMON_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for DVS statistics parameters.
 */
 
 #include <type_support.h>
 
-/** DVS statistics grid dimensions in number of cells.
+/* DVS statistics grid dimensions in number of cells.
  */
 
 struct ia_css_dvs_grid_dim {
-       uint32_t width;         /**< Width of DVS grid table in cells */
-       uint32_t height;        /**< Height of DVS grid table in cells */
+       uint32_t width;         /** Width of DVS grid table in cells */
+       uint32_t height;        /** Height of DVS grid table in cells */
 };
 
-/** DVS statistics dimensions in number of cells for
+/* DVS statistics dimensions in number of cells for
  * grid, coeffieicient and projection.
  */
 
@@ -55,7 +55,7 @@ struct ia_css_sdis_info {
                0,      /* dis_deci_factor_log2 */ \
        }
 
-/** DVS statistics grid
+/* DVS statistics grid
  *
  *  ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes))
  *             SDVS2 (DVS Support for DVS ver.2 (6-axes))
@@ -63,23 +63,23 @@ struct ia_css_sdis_info {
  *  ISP2: SDVS2 is used.
  */
 struct ia_css_dvs_grid_res {
-       uint32_t width;         /**< Width of DVS grid table.
+       uint32_t width;         /** Width of DVS grid table.
                                        (= Horizontal number of grid cells
                                        in table, which cells have effective
                                        statistics.)
                                        For DVS1, this is equal to
                                         the number of vertical statistics. */
-       uint32_t aligned_width; /**< Stride of each grid line.
+       uint32_t aligned_width; /** Stride of each grid line.
                                        (= Horizontal number of grid cells
                                        in table, which means
                                        the allocated width.) */
-       uint32_t height;        /**< Height of DVS grid table.
+       uint32_t height;        /** Height of DVS grid table.
                                        (= Vertical number of grid cells
                                        in table, which cells have effective
                                        statistics.)
                                        For DVS1, This is equal to
                                        the number of horizontal statistics. */
-       uint32_t aligned_height;/**< Stride of each grid column.
+       uint32_t aligned_height;/** Stride of each grid column.
                                        (= Vertical number of grid cells
                                        in table, which means
                                        the allocated height.) */
@@ -89,125 +89,125 @@ struct ia_css_dvs_grid_res {
  * However, that implies driver I/F changes
  */
 struct ia_css_dvs_grid_info {
-       uint32_t enable;        /**< DVS statistics enabled.
+       uint32_t enable;        /** DVS statistics enabled.
                                        0:disabled, 1:enabled */
-       uint32_t width;         /**< Width of DVS grid table.
+       uint32_t width;         /** Width of DVS grid table.
                                        (= Horizontal number of grid cells
                                        in table, which cells have effective
                                        statistics.)
                                        For DVS1, this is equal to
                                         the number of vertical statistics. */
-       uint32_t aligned_width; /**< Stride of each grid line.
+       uint32_t aligned_width; /** Stride of each grid line.
                                        (= Horizontal number of grid cells
                                        in table, which means
                                        the allocated width.) */
-       uint32_t height;        /**< Height of DVS grid table.
+       uint32_t height;        /** Height of DVS grid table.
                                        (= Vertical number of grid cells
                                        in table, which cells have effective
                                        statistics.)
                                        For DVS1, This is equal to
                                        the number of horizontal statistics. */
-       uint32_t aligned_height;/**< Stride of each grid column.
+       uint32_t aligned_height;/** Stride of each grid column.
                                        (= Vertical number of grid cells
                                        in table, which means
                                        the allocated height.) */
-       uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit.
+       uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
                                        (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
                                        For DVS1, valid value is 64.
                                        For DVS2, valid value is only 64,
                                        currently. */
-       uint32_t num_hor_coefs; /**< Number of horizontal coefficients. */
-       uint32_t num_ver_coefs; /**< Number of vertical coefficients. */
+       uint32_t num_hor_coefs; /** Number of horizontal coefficients. */
+       uint32_t num_ver_coefs; /** Number of vertical coefficients. */
 };
 
-/** Number of DVS statistics levels
+/* Number of DVS statistics levels
  */
 #define IA_CSS_DVS_STAT_NUM_OF_LEVELS  3
 
-/** DVS statistics generated by accelerator global configuration
+/* DVS statistics generated by accelerator global configuration
  */
 struct dvs_stat_public_dvs_global_cfg {
        unsigned char kappa;
-       /**< DVS statistics global configuration - kappa */
+       /** DVS statistics global configuration - kappa */
        unsigned char match_shift;
-       /**< DVS statistics global configuration - match_shift */
+       /** DVS statistics global configuration - match_shift */
        unsigned char ybin_mode;
-       /**< DVS statistics global configuration - y binning mode */
+       /** DVS statistics global configuration - y binning mode */
 };
 
-/** DVS statistics generated by accelerator level grid
+/* DVS statistics generated by accelerator level grid
  *  configuration
  */
 struct dvs_stat_public_dvs_level_grid_cfg {
        unsigned char grid_width;
-       /**< DVS statistics grid width */
+       /** DVS statistics grid width */
        unsigned char grid_height;
-       /**< DVS statistics grid height */
+       /** DVS statistics grid height */
        unsigned char block_width;
-       /**< DVS statistics block width */
+       /** DVS statistics block width */
        unsigned char block_height;
-       /**< DVS statistics block  height */
+       /** DVS statistics block  height */
 };
 
-/** DVS statistics generated by accelerator level grid start
+/* DVS statistics generated by accelerator level grid start
  *  configuration
  */
 struct dvs_stat_public_dvs_level_grid_start {
        unsigned short x_start;
-       /**< DVS statistics level x start */
+       /** DVS statistics level x start */
        unsigned short y_start;
-       /**< DVS statistics level y start */
+       /** DVS statistics level y start */
        unsigned char enable;
-       /**< DVS statistics level enable */
+       /** DVS statistics level enable */
 };
 
-/** DVS statistics generated by accelerator level grid end
+/* DVS statistics generated by accelerator level grid end
  *  configuration
  */
 struct dvs_stat_public_dvs_level_grid_end {
        unsigned short x_end;
-       /**< DVS statistics level x end */
+       /** DVS statistics level x end */
        unsigned short y_end;
-       /**< DVS statistics level y end */
+       /** DVS statistics level y end */
 };
 
-/** DVS statistics generated by accelerator Feature Extraction
+/* DVS statistics generated by accelerator Feature Extraction
  *  Region Of Interest (FE-ROI) configuration
  */
 struct dvs_stat_public_dvs_level_fe_roi_cfg {
        unsigned char x_start;
-       /**< DVS statistics fe-roi level x start */
+       /** DVS statistics fe-roi level x start */
        unsigned char y_start;
-       /**< DVS statistics fe-roi level y start */
+       /** DVS statistics fe-roi level y start */
        unsigned char x_end;
-       /**< DVS statistics fe-roi level x end */
+       /** DVS statistics fe-roi level x end */
        unsigned char y_end;
-       /**< DVS statistics fe-roi level y end */
+       /** DVS statistics fe-roi level y end */
 };
 
-/** DVS statistics generated by accelerator public configuration
+/* DVS statistics generated by accelerator public configuration
  */
 struct dvs_stat_public_dvs_grd_cfg {
        struct dvs_stat_public_dvs_level_grid_cfg    grd_cfg;
-       /**< DVS statistics level grid configuration */
+       /** DVS statistics level grid configuration */
        struct dvs_stat_public_dvs_level_grid_start  grd_start;
-       /**< DVS statistics level grid start configuration */
+       /** DVS statistics level grid start configuration */
        struct dvs_stat_public_dvs_level_grid_end    grd_end;
-       /**< DVS statistics level grid end configuration */
+       /** DVS statistics level grid end configuration */
 };
 
-/** DVS statistics grid generated by accelerator
+/* DVS statistics grid generated by accelerator
  */
 struct ia_css_dvs_stat_grid_info {
        struct dvs_stat_public_dvs_global_cfg       dvs_gbl_cfg;
-       /**< DVS statistics global configuration (kappa, match, binning) */
+       /** DVS statistics global configuration (kappa, match, binning) */
        struct dvs_stat_public_dvs_grd_cfg       grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
-       /**< DVS statistics grid configuration (blocks and grids) */
+       /** DVS statistics grid configuration (blocks and grids) */
        struct dvs_stat_public_dvs_level_fe_roi_cfg fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
-       /**< DVS statistics FE ROI (region of interest) configuration */
+       /** DVS statistics FE ROI (region of interest) configuration */
 };
 
-/** DVS statistics generated by accelerator default grid info
+/* DVS statistics generated by accelerator default grid info
  */
 #define DEFAULT_DVS_GRID_INFO { \
 { \
@@ -219,14 +219,14 @@ struct ia_css_dvs_stat_grid_info {
 }
 
 
-/** Union that holds all types of DVS statistics grid info in
+/* Union that holds all types of DVS statistics grid info in
  *  CSS format
  * */
 union ia_css_dvs_grid_u {
        struct ia_css_dvs_stat_grid_info dvs_stat_grid_info;
-       /**< DVS statistics produced by accelerator grid info */
+       /** DVS statistics produced by accelerator grid info */
        struct ia_css_dvs_grid_info dvs_grid_info;
-       /**< DVS (DVS1/DVS2) grid info */
+       /** DVS (DVS1/DVS2) grid info */
 };
 
 #endif /* __IA_CSS_SDIS_COMMON_TYPES_H */
index d408b58..d2ee570 100644 (file)
 #ifndef __IA_CSS_SDIS_TYPES_H
 #define __IA_CSS_SDIS_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for DVS statistics parameters.
 */
 
-/** Number of DVS coefficient types */
+/* Number of DVS coefficient types */
 #define IA_CSS_DVS_NUM_COEF_TYPES      6
 
 #ifndef PIPE_GENERATION
 #include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
 #endif
 
-/** DVS 1.0 Coefficients.
+/* DVS 1.0 Coefficients.
  *  This structure describes the coefficients that are needed for the dvs statistics.
  */
 
 struct ia_css_dvs_coefficients {
-       struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */
-       int16_t *hor_coefs;     /**< the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+       struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */
+       int16_t *hor_coefs;     /** the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
                                     containing the horizontal coefficients */
-       int16_t *ver_coefs;     /**< the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+       int16_t *ver_coefs;     /** the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
                                     containing the vertical coefficients */
 };
 
-/** DVS 1.0 Statistics.
+/* DVS 1.0 Statistics.
  *  This structure describes the statistics that are generated using the provided coefficients.
  */
 
 struct ia_css_dvs_statistics {
-       struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */
-       int32_t *hor_proj;      /**< the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
+       struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */
+       int32_t *hor_proj;      /** the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
                                     containing the horizontal projections */
-       int32_t *ver_proj;      /**< the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
+       int32_t *ver_proj;      /** the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
                                     containing the vertical projections */
 };
 
index 7db7dd1..2a0bc40 100644 (file)
 #ifndef __IA_CSS_SDIS2_TYPES_H
 #define __IA_CSS_SDIS2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for DVS statistics parameters.
 */
 
-/** Number of DVS coefficient types */
+/* Number of DVS coefficient types */
 #define IA_CSS_DVS2_NUM_COEF_TYPES     4
 
 #ifndef PIPE_GENERATION
 #include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
 #endif
 
-/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
  *  arrays that contain the coeffients for each type.
  */
 struct ia_css_dvs2_coef_types {
-       int16_t *odd_real; /**< real part of the odd coefficients*/
-       int16_t *odd_imag; /**< imaginary part of the odd coefficients*/
-       int16_t *even_real;/**< real part of the even coefficients*/
-       int16_t *even_imag;/**< imaginary part of the even coefficients*/
+       int16_t *odd_real; /** real part of the odd coefficients*/
+       int16_t *odd_imag; /** imaginary part of the odd coefficients*/
+       int16_t *even_real;/** real part of the even coefficients*/
+       int16_t *even_imag;/** imaginary part of the even coefficients*/
 };
 
-/** DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
+/* DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
  *  e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real 
  *  coefficients.
  */
 struct ia_css_dvs2_coefficients {
-       struct ia_css_dvs_grid_info grid;        /**< grid info contains the dimensions of the dvs grid */
-       struct ia_css_dvs2_coef_types hor_coefs; /**< struct with pointers that contain the horizontal coefficients */
-       struct ia_css_dvs2_coef_types ver_coefs; /**< struct with pointers that contain the vertical coefficients */
+       struct ia_css_dvs_grid_info grid;        /** grid info contains the dimensions of the dvs grid */
+       struct ia_css_dvs2_coef_types hor_coefs; /** struct with pointers that contain the horizontal coefficients */
+       struct ia_css_dvs2_coef_types ver_coefs; /** struct with pointers that contain the vertical coefficients */
 };
 
-/** DVS 2.0 Statistic types. This structure contains 4 pointers to
+/* DVS 2.0 Statistic types. This structure contains 4 pointers to
  *  arrays that contain the statistics for each type.
  */
 struct ia_css_dvs2_stat_types {
-       int32_t *odd_real; /**< real part of the odd statistics*/
-       int32_t *odd_imag; /**< imaginary part of the odd statistics*/
-       int32_t *even_real;/**< real part of the even statistics*/
-       int32_t *even_imag;/**< imaginary part of the even statistics*/
+       int32_t *odd_real; /** real part of the odd statistics*/
+       int32_t *odd_imag; /** imaginary part of the odd statistics*/
+       int32_t *even_real;/** real part of the even statistics*/
+       int32_t *even_imag;/** imaginary part of the even statistics*/
 };
 
-/** DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
+/* DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
  *  e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing 
  *  the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1]
  */
 struct ia_css_dvs2_statistics {
-       struct ia_css_dvs_grid_info grid;       /**< grid info contains the dimensions of the dvs grid */
-       struct ia_css_dvs2_stat_types hor_prod; /**< struct with pointers that contain the horizontal statistics */
-       struct ia_css_dvs2_stat_types ver_prod; /**< struct with pointers that contain the vertical statistics */
+       struct ia_css_dvs_grid_info grid;       /** grid info contains the dimensions of the dvs grid */
+       struct ia_css_dvs2_stat_types hor_prod; /** struct with pointers that contain the horizontal statistics */
+       struct ia_css_dvs2_stat_types ver_prod; /** struct with pointers that contain the vertical statistics */
 };
 
 #endif /* __IA_CSS_SDIS2_TYPES_H */
index cc47a50..91ea8dd 100644 (file)
 #ifndef __IA_CSS_TDF_TYPES_H
 #define __IA_CSS_TDF_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Transform Domain Filter parameters.
 */
 
 #include "type_support.h"
 
-/** Transform Domain Filter configuration
+/* Transform Domain Filter configuration
  *
  * \brief TDF public parameters.
  * \details Struct with all parameters for the TDF kernel that can be set
  * ISP2.6.1: TDF is used.
  */
 struct ia_css_tdf_config {
-       int32_t thres_flat_table[64];   /**< Final optimized strength table of NR for flat region. */
-       int32_t thres_detail_table[64]; /**< Final optimized strength table of NR for detail region. */
-       int32_t epsilon_0;              /**< Coefficient to control variance for dark area (for flat region). */
-       int32_t epsilon_1;              /**< Coefficient to control variance for bright area (for flat region). */
-       int32_t eps_scale_text;         /**< Epsilon scaling coefficient for texture region. */
-       int32_t eps_scale_edge;         /**< Epsilon scaling coefficient for edge region. */
-       int32_t sepa_flat;              /**< Threshold to judge flat (edge < m_Flat_thre). */
-       int32_t sepa_edge;              /**< Threshold to judge edge (edge > m_Edge_thre). */
-       int32_t blend_flat;             /**< Blending ratio at flat region. */
-       int32_t blend_text;             /**< Blending ratio at texture region. */
-       int32_t blend_edge;             /**< Blending ratio at edge region. */
-       int32_t shading_gain;           /**< Gain of Shading control. */
-       int32_t shading_base_gain;      /**< Base Gain of Shading control. */
-       int32_t local_y_gain;           /**< Gain of local luminance control. */
-       int32_t local_y_base_gain;      /**< Base gain of local luminance control. */
-       int32_t rad_x_origin;           /**< Initial x coord. for radius computation. */
-       int32_t rad_y_origin;           /**< Initial y coord. for radius computation. */
+       int32_t thres_flat_table[64];   /** Final optimized strength table of NR for flat region. */
+       int32_t thres_detail_table[64]; /** Final optimized strength table of NR for detail region. */
+       int32_t epsilon_0;              /** Coefficient to control variance for dark area (for flat region). */
+       int32_t epsilon_1;              /** Coefficient to control variance for bright area (for flat region). */
+       int32_t eps_scale_text;         /** Epsilon scaling coefficient for texture region. */
+       int32_t eps_scale_edge;         /** Epsilon scaling coefficient for edge region. */
+       int32_t sepa_flat;              /** Threshold to judge flat (edge < m_Flat_thre). */
+       int32_t sepa_edge;              /** Threshold to judge edge (edge > m_Edge_thre). */
+       int32_t blend_flat;             /** Blending ratio at flat region. */
+       int32_t blend_text;             /** Blending ratio at texture region. */
+       int32_t blend_edge;             /** Blending ratio at edge region. */
+       int32_t shading_gain;           /** Gain of Shading control. */
+       int32_t shading_base_gain;      /** Base Gain of Shading control. */
+       int32_t local_y_gain;           /** Gain of local luminance control. */
+       int32_t local_y_base_gain;      /** Base gain of local luminance control. */
+       int32_t rad_x_origin;           /** Initial x coord. for radius computation. */
+       int32_t rad_y_origin;           /** Initial y coord. for radius computation. */
 };
 
 #endif /* __IA_CSS_TDF_TYPES_H */
index 135563f..223423f 100644 (file)
@@ -16,7 +16,7 @@ more details.
 #ifndef _IA_CSS_TNR3_TYPES_H
 #define _IA_CSS_TNR3_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel
 */
 
@@ -27,7 +27,7 @@ more details.
  */
 #define TNR3_NUM_SEGMENTS    3
 
-/** Temporal Noise Reduction v3 (TNR3) configuration.
+/* Temporal Noise Reduction v3 (TNR3) configuration.
  * The parameter to this kernel is fourfold
  * 1. Three piecewise linear graphs (one for each plane) with three segments
  * each. Each line graph has Luma values on the x axis and sigma values for
@@ -44,17 +44,17 @@ more details.
  * 4. Selection of the reference frame buffer to be used for noise reduction.
  */
 struct ia_css_tnr3_kernel_config {
-       unsigned int maxfb_y;                        /**< Maximum Feedback Gain for Y */
-       unsigned int maxfb_u;                        /**< Maximum Feedback Gain for U */
-       unsigned int maxfb_v;                        /**< Maximum Feedback Gain for V */
-       unsigned int round_adj_y;                    /**< Rounding Adjust for Y */
-       unsigned int round_adj_u;                    /**< Rounding Adjust for U */
-       unsigned int round_adj_v;                    /**< Rounding Adjust for V */
-       unsigned int knee_y[TNR3_NUM_SEGMENTS - 1];  /**< Knee points */
-       unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for Y at points Y0, Y1, Y2, Y3 */
-       unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for U at points U0, U1, U2, U3 */
-       unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for V at points V0, V1, V2, V3 */
-       unsigned int ref_buf_select;                 /**< Selection of the reference buffer */
+       unsigned int maxfb_y;                        /** Maximum Feedback Gain for Y */
+       unsigned int maxfb_u;                        /** Maximum Feedback Gain for U */
+       unsigned int maxfb_v;                        /** Maximum Feedback Gain for V */
+       unsigned int round_adj_y;                    /** Rounding Adjust for Y */
+       unsigned int round_adj_u;                    /** Rounding Adjust for U */
+       unsigned int round_adj_v;                    /** Rounding Adjust for V */
+       unsigned int knee_y[TNR3_NUM_SEGMENTS - 1];  /** Knee points */
+       unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for Y at points Y0, Y1, Y2, Y3 */
+       unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for U at points U0, U1, U2, U3 */
+       unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for V at points V0, V1, V2, V3 */
+       unsigned int ref_buf_select;                 /** Selection of the reference buffer */
 };
 
 #endif
index 4fd35e6..9bbc9ab 100644 (file)
 #ifndef __IA_CSS_TNR_TYPES_H
 #define __IA_CSS_TNR_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Temporal Noise Reduction (TNR) parameters.
 */
 
-/** Temporal Noise Reduction (TNR) configuration.
+/* Temporal Noise Reduction (TNR) configuration.
  *
  *  When difference between current frame and previous frame is less than or
  *  equal to threshold, TNR works and current frame is mixed
 
 
 struct ia_css_tnr_config {
-       ia_css_u0_16 gain; /**< Interpolation ratio of current frame
+       ia_css_u0_16 gain; /** Interpolation ratio of current frame
                                and previous frame.
                                gain=0.0 -> previous frame is outputted.
                                gain=1.0 -> current frame is outputted.
                                u0.16, [0,65535],
                        default 32768(0.5), ineffective 65535(almost 1.0) */
-       ia_css_u0_16 threshold_y; /**< Threshold to enable interpolation of Y.
+       ia_css_u0_16 threshold_y; /** Threshold to enable interpolation of Y.
                                If difference between current frame and
                                previous frame is greater than threshold_y,
                                TNR for Y is disabled.
                                u0.16, [0,65535], default/ineffective 0 */
-       ia_css_u0_16 threshold_uv; /**< Threshold to enable interpolation of
+       ia_css_u0_16 threshold_uv; /** Threshold to enable interpolation of
                                U/V.
                                If difference between current frame and
                                previous frame is greater than threshold_uv,
index df5d37c..9df4e12 100644 (file)
@@ -23,9 +23,9 @@
 
 #define VFDEC_BITS_PER_PIXEL   GAMMA_OUTPUT_BITS
 
-/** Viewfinder decimation */
+/* Viewfinder decimation */
 struct sh_css_isp_vf_isp_config {
-       uint32_t vf_downscale_bits; /**< Log VF downscale value */
+       uint32_t vf_downscale_bits; /** Log VF downscale value */
        uint32_t enable;
        struct ia_css_frame_sp_info info;
        struct {
index d8cfdfb..e3efafa 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_VF_TYPES_H
 #define __IA_CSS_VF_TYPES_H
 
-/** Viewfinder decimation
+/* Viewfinder decimation
  *
  *  ISP block: vfeven_horizontal_downscale
  */
@@ -24,7 +24,7 @@
 #include <type_support.h>
 
 struct ia_css_vf_configuration {
-       uint32_t vf_downscale_bits; /**< Log VF downscale value */
+       uint32_t vf_downscale_bits; /** Log VF downscale value */
        const struct ia_css_frame_info *info;
 };
 
index 6bcfa27..bf98734 100644 (file)
 #ifndef __IA_CSS_WB_TYPES_H
 #define __IA_CSS_WB_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for White Balance parameters.
 */
 
 
-/** White Balance configuration (Gain Adjust).
+/* White Balance configuration (Gain Adjust).
  *
  *  ISP block: WB1
  *  ISP1: WB1 is used.
  *  ISP2: WB1 is used.
  */
 struct ia_css_wb_config {
-       uint32_t integer_bits; /**< Common exponent of gains.
+       uint32_t integer_bits; /** Common exponent of gains.
                                u8.0, [0,3],
                                default 1, ineffective 1 */
-       uint32_t gr;    /**< Significand of Gr gain.
+       uint32_t gr;    /** Significand of Gr gain.
                                u[integer_bits].[16-integer_bits], [0,65535],
                                default/ineffective 32768(u1.15, 1.0) */
-       uint32_t r;     /**< Significand of R gain.
+       uint32_t r;     /** Significand of R gain.
                                u[integer_bits].[16-integer_bits], [0,65535],
                                default/ineffective 32768(u1.15, 1.0) */
-       uint32_t b;     /**< Significand of B gain.
+       uint32_t b;     /** Significand of B gain.
                                u[integer_bits].[16-integer_bits], [0,65535],
                                default/ineffective 32768(u1.15, 1.0) */
-       uint32_t gb;    /**< Significand of Gb gain.
+       uint32_t gb;    /** Significand of Gb gain.
                                u[integer_bits].[16-integer_bits], [0,65535],
                                default/ineffective 32768(u1.15, 1.0) */
 };
index 3018100..abcb531 100644 (file)
@@ -21,7 +21,7 @@
 #include "ia_css_xnr.host.h"
 
 const struct ia_css_xnr_config default_xnr_config = {
-       /** default threshold 6400 translates to 25 on ISP. */
+       /* default threshold 6400 translates to 25 on ISP. */
        6400
 };
 
index 806c9f8..a5caebb 100644 (file)
@@ -41,7 +41,7 @@ struct sh_css_isp_xnr_vamem_params {
 };
 
 struct sh_css_isp_xnr_params {
-       /** XNR threshold.
+       /* XNR threshold.
         * type:u0.16 but actual valid range is:[0,255]
         * valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits)
         * default: 25 */
index 89e8b0f..d2b6342 100644 (file)
 #ifndef __IA_CSS_XNR_TYPES_H
 #define __IA_CSS_XNR_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Extra Noise Reduction (XNR) parameters.
 */
 
-/** XNR table.
+/* XNR table.
  *
  *  NOTE: The driver does not need to set this table,
  *        because the default values are set inside the css.
  *
  */
 
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
 #define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2      6
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
 #define IA_CSS_VAMEM_1_XNR_TABLE_SIZE           (1U<<IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2)
 
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
 #define IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2      6
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
 #define IA_CSS_VAMEM_2_XNR_TABLE_SIZE          (1U<<IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2)
 
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
      IA_CSS_VAMEM_TYPE_2(ISP2400) */
 union ia_css_xnr_data {
        uint16_t vamem_1[IA_CSS_VAMEM_1_XNR_TABLE_SIZE];
-       /**< Coefficients table on vamem type1. u0.12, [0,4095] */
+       /** Coefficients table on vamem type1. u0.12, [0,4095] */
        uint16_t vamem_2[IA_CSS_VAMEM_2_XNR_TABLE_SIZE];
-       /**< Coefficients table on vamem type2. u0.12, [0,4095] */
+       /** Coefficients table on vamem type2. u0.12, [0,4095] */
 };
 
 struct ia_css_xnr_table {
@@ -61,7 +61,7 @@ struct ia_css_xnr_table {
 };
 
 struct ia_css_xnr_config {
-       /** XNR threshold.
+       /* XNR threshold.
         * type:u0.16 valid range:[0,65535]
         * default: 6400 */
        uint16_t threshold;
index 8f14d10..669200c 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_XNR3_TYPES_H
 #define __IA_CSS_XNR3_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Extra Noise Reduction (XNR) parameters.
 */
 
  * IA_CSS_XNR3_SIGMA_SCALE.
  */
 struct ia_css_xnr3_sigma_params {
-       int y0;     /**< Sigma for Y range similarity in dark area */
-       int y1;     /**< Sigma for Y range similarity in bright area */
-       int u0;     /**< Sigma for U range similarity in dark area */
-       int u1;     /**< Sigma for U range similarity in bright area */
-       int v0;     /**< Sigma for V range similarity in dark area */
-       int v1;     /**< Sigma for V range similarity in bright area */
+       int y0;     /** Sigma for Y range similarity in dark area */
+       int y1;     /** Sigma for Y range similarity in bright area */
+       int u0;     /** Sigma for U range similarity in dark area */
+       int u1;     /** Sigma for U range similarity in bright area */
+       int v0;     /** Sigma for V range similarity in dark area */
+       int v1;     /** Sigma for V range similarity in bright area */
 };
 
 /**
@@ -64,10 +64,10 @@ struct ia_css_xnr3_sigma_params {
  * with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0.
  */
 struct ia_css_xnr3_coring_params {
-       int u0;     /**< Coring threshold of U channel in dark area */
-       int u1;     /**< Coring threshold of U channel in bright area */
-       int v0;     /**< Coring threshold of V channel in dark area */
-       int v1;     /**< Coring threshold of V channel in bright area */
+       int u0;     /** Coring threshold of U channel in dark area */
+       int u1;     /** Coring threshold of U channel in bright area */
+       int v0;     /** Coring threshold of V channel in dark area */
+       int v1;     /** Coring threshold of V channel in bright area */
 };
 
 /**
@@ -81,7 +81,7 @@ struct ia_css_xnr3_coring_params {
  * value of 0.0 bypasses the entire xnr3 filter.
  */
 struct ia_css_xnr3_blending_params {
-       int strength;   /**< Blending strength */
+       int strength;   /** Blending strength */
 };
 
 /**
@@ -90,9 +90,9 @@ struct ia_css_xnr3_blending_params {
  * from the CSS API.
  */
 struct ia_css_xnr3_config {
-       struct ia_css_xnr3_sigma_params    sigma;    /**< XNR3 sigma parameters */
-       struct ia_css_xnr3_coring_params   coring;   /**< XNR3 coring parameters */
-       struct ia_css_xnr3_blending_params blending; /**< XNR3 blending parameters */
+       struct ia_css_xnr3_sigma_params    sigma;    /** XNR3 sigma parameters */
+       struct ia_css_xnr3_coring_params   coring;   /** XNR3 coring parameters */
+       struct ia_css_xnr3_blending_params blending; /** XNR3 blending parameters */
 };
 
 #endif /* __IA_CSS_XNR3_TYPES_H */
index 3f46655..3f8589a 100644 (file)
 #ifndef __IA_CSS_YNR_TYPES_H
 #define __IA_CSS_YNR_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR).
 */
 
-/** Configuration used by Bayer Noise Reduction (BNR) and
+/* Configuration used by Bayer Noise Reduction (BNR) and
  *  YCC Noise Reduction (YNR,CNR).
  *
  *  ISP block: BNR1, YNR1, CNR1
  *        BNR1,YNR2,CNR2 are used for Still.
  */
 struct ia_css_nr_config {
-       ia_css_u0_16 bnr_gain;     /**< Strength of noise reduction (BNR).
+       ia_css_u0_16 bnr_gain;     /** Strength of noise reduction (BNR).
                                u0.16, [0,65535],
                                default 14336(0.21875), ineffective 0 */
-       ia_css_u0_16 ynr_gain;     /**< Strength of noise reduction (YNR).
+       ia_css_u0_16 ynr_gain;     /** Strength of noise reduction (YNR).
                                u0.16, [0,65535],
                                default 14336(0.21875), ineffective 0 */
-       ia_css_u0_16 direction;    /**< Sensitivity of edge (BNR).
+       ia_css_u0_16 direction;    /** Sensitivity of edge (BNR).
                                u0.16, [0,65535],
                                default 512(0.0078125), ineffective 0 */
-       ia_css_u0_16 threshold_cb; /**< Coring threshold for Cb (CNR).
+       ia_css_u0_16 threshold_cb; /** Coring threshold for Cb (CNR).
                                This is the same as
                                de_config.c1_coring_threshold.
                                u0.16, [0,65535],
                                default 0(0), ineffective 0 */
-       ia_css_u0_16 threshold_cr; /**< Coring threshold for Cr (CNR).
+       ia_css_u0_16 threshold_cr; /** Coring threshold for Cr (CNR).
                                This is the same as
                                de_config.c2_coring_threshold.
                                u0.16, [0,65535],
                                default 0(0), ineffective 0 */
 };
 
-/** Edge Enhancement (sharpen) configuration.
+/* Edge Enhancement (sharpen) configuration.
  *
  *  ISP block: YEE1
  *  ISP1: YEE1 is used.
@@ -57,24 +57,24 @@ struct ia_css_nr_config {
  *       (YEE2 is used for Still.)
  */
 struct ia_css_ee_config {
-       ia_css_u5_11 gain;        /**< The strength of sharpness.
+       ia_css_u5_11 gain;        /** The strength of sharpness.
                                        u5.11, [0,65535],
                                        default 8192(4.0), ineffective 0 */
-       ia_css_u8_8 threshold;    /**< The threshold that divides noises from
+       ia_css_u8_8 threshold;    /** The threshold that divides noises from
                                        edge.
                                        u8.8, [0,65535],
                                        default 256(1.0), ineffective 65535 */
-       ia_css_u5_11 detail_gain; /**< The strength of sharpness in pell-mell
+       ia_css_u5_11 detail_gain; /** The strength of sharpness in pell-mell
                                        area.
                                        u5.11, [0,65535],
                                        default 2048(1.0), ineffective 0 */
 };
 
-/** YNR and YEE (sharpen) configuration.
+/* YNR and YEE (sharpen) configuration.
  */
 struct ia_css_yee_config {
-       struct ia_css_nr_config nr; /**< The NR configuration. */
-       struct ia_css_ee_config ee; /**< The EE configuration. */
+       struct ia_css_nr_config nr; /** The NR configuration. */
+       struct ia_css_ee_config ee; /** The EE configuration. */
 };
 
 #endif /* __IA_CSS_YNR_TYPES_H */
index e0a0b10..83161a2 100644 (file)
 #ifndef __IA_CSS_YNR2_TYPES_H
 #define __IA_CSS_YNR2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Y(Luma) Noise Reduction.
 */
 
-/** Y(Luma) Noise Reduction configuration.
+/* Y(Luma) Noise Reduction configuration.
  *
  *  ISP block: YNR2 & YEE2
  * (ISP1: YNR1 and YEE1 are used.)
  *  ISP2: YNR2 and YEE2 are used for Still.
  */
 struct ia_css_ynr_config {
-       uint16_t edge_sense_gain_0;   /**< Sensitivity of edge in dark area.
+       uint16_t edge_sense_gain_0;   /** Sensitivity of edge in dark area.
                                        u13.0, [0,8191],
                                        default 1000, ineffective 0 */
-       uint16_t edge_sense_gain_1;   /**< Sensitivity of edge in bright area.
+       uint16_t edge_sense_gain_1;   /** Sensitivity of edge in bright area.
                                        u13.0, [0,8191],
                                        default 1000, ineffective 0 */
-       uint16_t corner_sense_gain_0; /**< Sensitivity of corner in dark area.
+       uint16_t corner_sense_gain_0; /** Sensitivity of corner in dark area.
                                        u13.0, [0,8191],
                                        default 1000, ineffective 0 */
-       uint16_t corner_sense_gain_1; /**< Sensitivity of corner in bright area.
+       uint16_t corner_sense_gain_1; /** Sensitivity of corner in bright area.
                                        u13.0, [0,8191],
                                        default 1000, ineffective 0 */
 };
 
-/** Fringe Control configuration.
+/* Fringe Control configuration.
  *
  *  ISP block: FC2 (FC2 is used with YNR2/YEE2.)
  * (ISP1: FC2 is not used.)
@@ -49,43 +49,43 @@ struct ia_css_ynr_config {
  *  ISP2: FC2 is used for Still.
  */
 struct ia_css_fc_config {
-       uint8_t  gain_exp;   /**< Common exponent of gains.
+       uint8_t  gain_exp;   /** Common exponent of gains.
                                u8.0, [0,13],
                                default 1, ineffective 0 */
-       uint16_t coring_pos_0; /**< Coring threshold for positive edge in dark area.
+       uint16_t coring_pos_0; /** Coring threshold for positive edge in dark area.
                                u0.13, [0,8191],
                                default 0(0), ineffective 0 */
-       uint16_t coring_pos_1; /**< Coring threshold for positive edge in bright area.
+       uint16_t coring_pos_1; /** Coring threshold for positive edge in bright area.
                                u0.13, [0,8191],
                                default 0(0), ineffective 0 */
-       uint16_t coring_neg_0; /**< Coring threshold for negative edge in dark area.
+       uint16_t coring_neg_0; /** Coring threshold for negative edge in dark area.
                                u0.13, [0,8191],
                                default 0(0), ineffective 0 */
-       uint16_t coring_neg_1; /**< Coring threshold for negative edge in bright area.
+       uint16_t coring_neg_1; /** Coring threshold for negative edge in bright area.
                                u0.13, [0,8191],
                                default 0(0), ineffective 0 */
-       uint16_t gain_pos_0; /**< Gain for positive edge in dark area.
+       uint16_t gain_pos_0; /** Gain for positive edge in dark area.
                                u0.13, [0,8191],
                                default 4096(0.5), ineffective 0 */
-       uint16_t gain_pos_1; /**< Gain for positive edge in bright area.
+       uint16_t gain_pos_1; /** Gain for positive edge in bright area.
                                u0.13, [0,8191],
                                default 4096(0.5), ineffective 0 */
-       uint16_t gain_neg_0; /**< Gain for negative edge in dark area.
+       uint16_t gain_neg_0; /** Gain for negative edge in dark area.
                                u0.13, [0,8191],
                                default 4096(0.5), ineffective 0 */
-       uint16_t gain_neg_1; /**< Gain for negative edge in bright area.
+       uint16_t gain_neg_1; /** Gain for negative edge in bright area.
                                u0.13, [0,8191],
                                default 4096(0.5), ineffective 0 */
-       uint16_t crop_pos_0; /**< Limit for positive edge in dark area.
+       uint16_t crop_pos_0; /** Limit for positive edge in dark area.
                                u0.13, [0,8191],
                                default/ineffective 8191(almost 1.0) */
-       uint16_t crop_pos_1; /**< Limit for positive edge in bright area.
+       uint16_t crop_pos_1; /** Limit for positive edge in bright area.
                                u0.13, [0,8191],
                                default/ineffective 8191(almost 1.0) */
-       int16_t  crop_neg_0; /**< Limit for negative edge in dark area.
+       int16_t  crop_neg_0; /** Limit for negative edge in dark area.
                                s0.13, [-8192,0],
                                default/ineffective -8192(-1.0) */
-       int16_t  crop_neg_1; /**< Limit for negative edge in bright area.
+       int16_t  crop_neg_1; /** Limit for negative edge in bright area.
                                s0.13, [-8192,0],
                                default/ineffective -8192(-1.0) */
 };
index c651946..5a58abe 100644 (file)
@@ -269,7 +269,7 @@ enum ia_css_err
 ia_css_binary_find(struct ia_css_binary_descr *descr,
                   struct ia_css_binary *binary);
 
-/** @brief Get the shading information of the specified shading correction type.
+/* @brief Get the shading information of the specified shading correction type.
  *
  * @param[in] binary: The isp binary which has the shading correction.
  * @param[in] type: The shading correction type.
index e028e46..295e070 100644 (file)
@@ -972,7 +972,7 @@ ia_css_binary_uninit(void)
        return IA_CSS_SUCCESS;
 }
 
-/** @brief Compute decimation factor for 3A statistics and shading correction.
+/* @brief Compute decimation factor for 3A statistics and shading correction.
  *
  * @param[in]  width   Frame width in pixels.
  * @param[in]  height  Frame height in pixels.
index 42d9a85..e50d9f2 100644 (file)
@@ -152,7 +152,7 @@ void ia_css_queue_map(
                unmap_buffer_type_to_queue_id(thread_id, buf_type);
 }
 
-/**
+/*
  * @brief Query the internal queue ID.
  */
 bool ia_css_query_internal_queue_id(
index 3c8dcfd..4b28b2a 100644 (file)
@@ -54,21 +54,21 @@ extern unsigned int ia_css_debug_trace_level;
  *  Values can be combined to dump a combination of sets.
  */
 enum ia_css_debug_enable_param_dump {
-       IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /**< FPN table */
-       IA_CSS_DEBUG_DUMP_OB = 1 << 1,  /**< OB table */
-       IA_CSS_DEBUG_DUMP_SC = 1 << 2,  /**< Shading table */
-       IA_CSS_DEBUG_DUMP_WB = 1 << 3,  /**< White balance */
-       IA_CSS_DEBUG_DUMP_DP = 1 << 4,  /**< Defect Pixel */
-       IA_CSS_DEBUG_DUMP_BNR = 1 << 5,  /**< Bayer Noise Reductions */
-       IA_CSS_DEBUG_DUMP_S3A = 1 << 6,  /**< 3A Statistics */
-       IA_CSS_DEBUG_DUMP_DE = 1 << 7,  /**< De Mosaicing */
-       IA_CSS_DEBUG_DUMP_YNR = 1 << 8,  /**< Luma Noise Reduction */
-       IA_CSS_DEBUG_DUMP_CSC = 1 << 9,  /**< Color Space Conversion */
-       IA_CSS_DEBUG_DUMP_GC = 1 << 10,  /**< Gamma Correction */
-       IA_CSS_DEBUG_DUMP_TNR = 1 << 11,  /**< Temporal Noise Reduction */
-       IA_CSS_DEBUG_DUMP_ANR = 1 << 12,  /**< Advanced Noise Reduction */
-       IA_CSS_DEBUG_DUMP_CE = 1 << 13,  /**< Chroma Enhancement */
-       IA_CSS_DEBUG_DUMP_ALL = 1 << 14  /**< Dump all device parameters */
+       IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /** FPN table */
+       IA_CSS_DEBUG_DUMP_OB = 1 << 1,  /** OB table */
+       IA_CSS_DEBUG_DUMP_SC = 1 << 2,  /** Shading table */
+       IA_CSS_DEBUG_DUMP_WB = 1 << 3,  /** White balance */
+       IA_CSS_DEBUG_DUMP_DP = 1 << 4,  /** Defect Pixel */
+       IA_CSS_DEBUG_DUMP_BNR = 1 << 5,  /** Bayer Noise Reductions */
+       IA_CSS_DEBUG_DUMP_S3A = 1 << 6,  /** 3A Statistics */
+       IA_CSS_DEBUG_DUMP_DE = 1 << 7,  /** De Mosaicing */
+       IA_CSS_DEBUG_DUMP_YNR = 1 << 8,  /** Luma Noise Reduction */
+       IA_CSS_DEBUG_DUMP_CSC = 1 << 9,  /** Color Space Conversion */
+       IA_CSS_DEBUG_DUMP_GC = 1 << 10,  /** Gamma Correction */
+       IA_CSS_DEBUG_DUMP_TNR = 1 << 11,  /** Temporal Noise Reduction */
+       IA_CSS_DEBUG_DUMP_ANR = 1 << 12,  /** Advanced Noise Reduction */
+       IA_CSS_DEBUG_DUMP_CE = 1 << 13,  /** Chroma Enhancement */
+       IA_CSS_DEBUG_DUMP_ALL = 1 << 14  /** Dump all device parameters */
 };
 
 #define IA_CSS_ERROR(fmt, ...) \
index 0fa7cb2..dd1127a 100644 (file)
@@ -1617,7 +1617,7 @@ void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
 
 #elif SP_DEBUG == SP_DEBUG_TRACE
 
-/**
+/*
  * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will
  * me mapped on the file name string.
  *
@@ -2267,7 +2267,7 @@ void ia_css_debug_dump_debug_info(const char *context)
        return;
 }
 
-/** this function is for debug use, it can make SP go to sleep
+/* this function is for debug use, it can make SP go to sleep
   state after each frame, then user can dump the stable SP dmem.
   this function can be called after ia_css_start_sp()
   and before sh_css_init_buffer_queues()
@@ -2526,7 +2526,7 @@ void ia_css_debug_dump_ddr_debug_queue(void)
 }
 */
 
-/**
+/*
  * @brief Initialize the debug mode.
  * Refer to "ia_css_debug.h" for more details.
  */
@@ -2537,7 +2537,7 @@ bool ia_css_debug_mode_init(void)
        return rc;
 }
 
-/**
+/*
  * @brief Disable the DMA channel.
  * Refer to "ia_css_debug.h" for more details.
  */
@@ -2552,7 +2552,7 @@ ia_css_debug_mode_disable_dma_channel(int dma_id,
        return rc;
 }
 
-/**
+/*
  * @brief Enable the DMA channel.
  * Refer to "ia_css_debug.h" for more details.
  */
index 2698c3e..239c067 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -52,7 +52,7 @@ more details.
 
 #include "ia_css_queue.h"      /* host_sp_enqueue_XXX */
 #include "ia_css_event.h"      /* ia_css_event_encode */
-/**
+/*
  * @brief Encode the information into the software-event.
  * Refer to "sw_event_public.h" for details.
  */
index 56d6858..913a4bf 100644 (file)
@@ -37,7 +37,7 @@ int ia_css_eventq_recv(
        return error;
 }
 
-/**
+/*
  * @brief The Host sends the event to the SP.
  * Refer to "sh_css_sp.h" for details.
  */
index c7e07b7..89ad808 100644 (file)
@@ -41,7 +41,7 @@ more details.
 /*********************************************************************
 ****   Frame INFO APIs
 **********************************************************************/
-/** @brief Sets the given width and alignment to the frame info
+/* @brief Sets the given width and alignment to the frame info
  *
  * @param
  * @param[in]  info        The info to which parameters would set
@@ -53,7 +53,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
        unsigned int width,
        unsigned int min_padded_width);
 
-/** @brief Sets the given format to the frame info
+/* @brief Sets the given format to the frame info
  *
  * @param
  * @param[in]  info        The info to which parameters would set
@@ -63,7 +63,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
 void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
        enum ia_css_frame_format format);
 
-/** @brief Sets the frame info with the given parameters
+/* @brief Sets the frame info with the given parameters
  *
  * @param
  * @param[in]  info        The info to which parameters would set
@@ -79,7 +79,7 @@ void ia_css_frame_info_init(struct ia_css_frame_info *info,
        enum ia_css_frame_format format,
        unsigned int aligned);
 
-/** @brief Checks whether 2 frame infos has the same resolution
+/* @brief Checks whether 2 frame infos has the same resolution
  *
  * @param
  * @param[in]  frame_a         The first frame to be compared
@@ -90,7 +90,7 @@ bool ia_css_frame_info_is_same_resolution(
        const struct ia_css_frame_info *info_a,
        const struct ia_css_frame_info *info_b);
 
-/** @brief Check the frame info is valid
+/* @brief Check the frame info is valid
  *
  * @param
  * @param[in]  info       The frame attributes to be initialized
@@ -102,7 +102,7 @@ enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
 ****   Frame APIs
 **********************************************************************/
 
-/** @brief Initialize the plane depending on the frame type
+/* @brief Initialize the plane depending on the frame type
  *
  * @param
  * @param[in]  frame           The frame attributes to be initialized
@@ -110,7 +110,7 @@ enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
  */
 enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
 
-/** @brief Free an array of frames
+/* @brief Free an array of frames
  *
  * @param
  * @param[in]  num_frames      The number of frames to be freed in the array
@@ -120,7 +120,7 @@ enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
 void ia_css_frame_free_multiple(unsigned int num_frames,
        struct ia_css_frame **frames_array);
 
-/** @brief Allocate a CSS frame structure of given size in bytes..
+/* @brief Allocate a CSS frame structure of given size in bytes..
  *
  * @param      frame   The allocated frame.
  * @param[in]  size_bytes      The frame size in bytes.
@@ -135,7 +135,7 @@ enum ia_css_err ia_css_frame_allocate_with_buffer_size(
        const unsigned int size_bytes,
        const bool contiguous);
 
-/** @brief Check whether 2 frames are same type
+/* @brief Check whether 2 frames are same type
  *
  * @param
  * @param[in]  frame_a         The first frame to be compared
@@ -146,7 +146,7 @@ bool ia_css_frame_is_same_type(
        const struct ia_css_frame *frame_a,
        const struct ia_css_frame *frame_b);
 
-/** @brief Configure a dma port from frame info
+/* @brief Configure a dma port from frame info
  *
  * @param
  * @param[in]  config         The DAM port configuration
@@ -158,7 +158,7 @@ void ia_css_dma_configure_from_info(
        const struct ia_css_frame_info *info);
 
 #ifdef ISP2401
-/** @brief Finds the cropping resolution
+/* @brief Finds the cropping resolution
  * This function finds the maximum cropping resolution in an input image keeping
  * the aspect ratio for the given output resolution.Calculates the coordinates
  * for cropping from the center and returns the starting pixel location of the
index 8e651b8..2283dd1 100644 (file)
@@ -53,7 +53,7 @@ enum ia_css_param_class {
 };
 #define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1)
 
-/** ISP parameter descriptor */
+/* ISP parameter descriptor */
 struct ia_css_isp_parameter {
        uint32_t offset; /* Offset in isp_<mem>)parameters, etc. */
        uint32_t size;   /* Disabled if 0 */
@@ -77,10 +77,10 @@ struct ia_css_isp_param_isp_segments {
 
 /* Memory offsets in binary info */
 struct ia_css_isp_param_memory_offsets {
-       uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES];  /**< offset wrt hdr in bytes */
+       uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES];  /** offset wrt hdr in bytes */
 };
 
-/** Offsets for ISP kernel parameters per isp memory.
+/* Offsets for ISP kernel parameters per isp memory.
  * Only relevant for standard ISP binaries, not ACC or SP.
  */
 union ia_css_all_memory_offsets {
index 02bf908..4cf2def 100644 (file)
@@ -44,7 +44,7 @@ more details.
  * Virtual Input System. (Input System 2401)
  */
 typedef input_system_cfg_t     ia_css_isys_descr_t;
-/** end of Virtual Input System */
+/* end of Virtual Input System */
 #endif
 
 #if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
@@ -112,7 +112,7 @@ unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
 
 #endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
 
-/** @brief Translate format and compression to format type.
+/* @brief Translate format and compression to format type.
  *
  * @param[in]  input_format    The input format.
  * @param[in]  compression     The compression scheme.
@@ -195,7 +195,7 @@ extern void ia_css_isys_stream2mmio_sid_rmgr_release(
        stream2mmio_ID_t        stream2mmio,
        stream2mmio_sid_ID_t    *sid);
 
-/** end of Virtual Input System */
+/* end of Virtual Input System */
 #endif
 
 #endif                         /* __IA_CSS_ISYS_H__ */
index 46a157f..70f6cb5 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index 0f1e8a2..90922a7 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -166,7 +166,7 @@ static int32_t calculate_stride(
        bool    raw_packed,
        int32_t align_in_bytes);
 
-/** end of Forwarded Declaration */
+/* end of Forwarded Declaration */
 
 /**************************************************
  *
@@ -292,7 +292,7 @@ ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
        return rc;
 }
 
-/** end of Public Methods */
+/* end of Public Methods */
 
 /**************************************************
  *
@@ -894,5 +894,5 @@ static csi_mipi_packet_type_t get_csi_mipi_packet_type(
 
        return packet_type;
 }
-/** end of Private Methods */
+/* end of Private Methods */
 #endif
index 90646f5..e64936e 100644 (file)
@@ -103,7 +103,7 @@ struct ia_css_pipeline_stage_desc {
        struct ia_css_frame *vf_frame;
 };
 
-/** @brief initialize the pipeline module
+/* @brief initialize the pipeline module
  *
  * @return    None
  *
@@ -112,7 +112,7 @@ struct ia_css_pipeline_stage_desc {
  */
 void ia_css_pipeline_init(void);
 
-/** @brief initialize the pipeline structure with default values
+/* @brief initialize the pipeline structure with default values
  *
  * @param[out] pipeline  structure to be initialized with defaults
  * @param[in] pipe_id
@@ -129,7 +129,7 @@ enum ia_css_err ia_css_pipeline_create(
        unsigned int pipe_num,
        unsigned int dvs_frame_delay);
 
-/** @brief destroy a pipeline
+/* @brief destroy a pipeline
  *
  * @param[in] pipeline
  * @return    None
@@ -138,7 +138,7 @@ enum ia_css_err ia_css_pipeline_create(
 void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
 
 
-/** @brief Starts a pipeline
+/* @brief Starts a pipeline
  *
  * @param[in] pipe_id
  * @param[in] pipeline
@@ -148,7 +148,7 @@ void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
 void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
                           struct ia_css_pipeline *pipeline);
 
-/** @brief Request to stop a pipeline
+/* @brief Request to stop a pipeline
  *
  * @param[in] pipeline
  * @return                     IA_CSS_SUCCESS or error code upon error.
@@ -156,7 +156,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
  */
 enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
 
-/** @brief Check whether pipeline has stopped
+/* @brief Check whether pipeline has stopped
  *
  * @param[in] pipeline
  * @return    true if the pipeline has stopped
@@ -164,7 +164,7 @@ enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
  */
 bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
 
-/** @brief clean all the stages pipeline and make it as new
+/* @brief clean all the stages pipeline and make it as new
  *
  * @param[in] pipeline
  * @return    None
@@ -172,7 +172,7 @@ bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
  */
 void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline);
 
-/** @brief Add a stage to pipeline.
+/* @brief Add a stage to pipeline.
  *
  * @param     pipeline               Pointer to the pipeline to be added to.
  * @param[in] stage_desc       The description of the stage
@@ -188,7 +188,7 @@ enum ia_css_err ia_css_pipeline_create_and_add_stage(
                        struct ia_css_pipeline_stage_desc *stage_desc,
                        struct ia_css_pipeline_stage **stage);
 
-/** @brief Finalize the stages in a pipeline
+/* @brief Finalize the stages in a pipeline
  *
  * @param     pipeline               Pointer to the pipeline to be added to.
  * @return                     None
@@ -198,7 +198,7 @@ enum ia_css_err ia_css_pipeline_create_and_add_stage(
 void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
                        bool continuous);
 
-/** @brief gets a stage from the pipeline
+/* @brief gets a stage from the pipeline
  *
  * @param[in] pipeline
  * @return                     IA_CSS_SUCCESS or error code upon error.
@@ -208,7 +208,7 @@ enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
                          int mode,
                          struct ia_css_pipeline_stage **stage);
 
-/** @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
+/* @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
  *
  * @param[in] pipeline
  * @param[in] fw_handle
@@ -221,7 +221,7 @@ enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline *pipeli
                          uint32_t fw_handle,
                          struct ia_css_pipeline_stage **stage);
 
-/** @brief Gets the Firmware handle correponding the stage num from the pipeline
+/* @brief Gets the Firmware handle correponding the stage num from the pipeline
  *
  * @param[in] pipeline
  * @param[in] stage_num
@@ -234,7 +234,7 @@ enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline *pipeli
                          uint32_t stage_num,
                          uint32_t *fw_handle);
 
-/** @brief gets the output stage from the pipeline
+/* @brief gets the output stage from the pipeline
  *
  * @param[in] pipeline
  * @return                     IA_CSS_SUCCESS or error code upon error.
@@ -245,7 +245,7 @@ enum ia_css_err ia_css_pipeline_get_output_stage(
                        int mode,
                        struct ia_css_pipeline_stage **stage);
 
-/** @brief Checks whether the pipeline uses params
+/* @brief Checks whether the pipeline uses params
  *
  * @param[in] pipeline
  * @return    true if the pipeline uses params
index 62d1397..8f93d29 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -114,7 +114,7 @@ void ia_css_pipeline_map(unsigned int pipe_num, bool map)
        IA_CSS_LEAVE_PRIVATE("void");
 }
 
-/** @brief destroy a pipeline
+/* @brief destroy a pipeline
  *
  * @param[in] pipeline
  * @return    None
@@ -187,7 +187,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
              "ia_css_pipeline_start() leave: return_void\n");
 }
 
-/**
+/*
  * @brief Query the SP thread ID.
  * Refer to "sh_css_internal.h" for details.
  */
@@ -285,7 +285,7 @@ void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline)
        IA_CSS_LEAVE_PRIVATE("void");
 }
 
-/** @brief Add a stage to pipeline.
+/* @brief Add a stage to pipeline.
  *
  * @param       pipeline      Pointer to the pipeline to be added to.
  * @param[in]   stage_desc    The description of the stage
index e50a0f8..aaf2e24 100644 (file)
@@ -51,7 +51,7 @@ typedef struct ia_css_queue ia_css_queue_t;
 /*****************************************************************************
  * Queue Public APIs
  *****************************************************************************/
-/** @brief Initialize a local queue instance.
+/* @brief Initialize a local queue instance.
  *
  * @param[out] qhandle. Handle to queue instance for use with API
  * @param[in]  desc.   Descriptor with queue properties filled-in
@@ -63,7 +63,7 @@ extern int ia_css_queue_local_init(
                        ia_css_queue_t *qhandle,
                        ia_css_queue_local_t *desc);
 
-/** @brief Initialize a remote queue instance
+/* @brief Initialize a remote queue instance
  *
  * @param[out] qhandle. Handle to queue instance for use with API
  * @param[in]  desc.   Descriptor with queue properties filled-in
@@ -74,7 +74,7 @@ extern int ia_css_queue_remote_init(
                        ia_css_queue_t *qhandle,
                        ia_css_queue_remote_t *desc);
 
-/** @brief Uninitialize a queue instance
+/* @brief Uninitialize a queue instance
  *
  * @param[in]  qhandle. Handle to queue instance
  * @return     0 - Successful uninit.
@@ -83,7 +83,7 @@ extern int ia_css_queue_remote_init(
 extern int ia_css_queue_uninit(
                        ia_css_queue_t *qhandle);
 
-/** @brief Enqueue an item in the queue instance
+/* @brief Enqueue an item in the queue instance
  *
  * @param[in]  qhandle. Handle to queue instance
  * @param[in]  item.    Object to be enqueued.
@@ -96,7 +96,7 @@ extern int ia_css_queue_enqueue(
                        ia_css_queue_t *qhandle,
                        uint32_t item);
 
-/** @brief Dequeue an item from the queue instance
+/* @brief Dequeue an item from the queue instance
  *
  * @param[in]  qhandle. Handle to queue instance
  * @param[out] item.    Object to be dequeued into this item.
@@ -110,7 +110,7 @@ extern int ia_css_queue_dequeue(
                        ia_css_queue_t *qhandle,
                        uint32_t *item);
 
-/** @brief Check if the queue is empty
+/* @brief Check if the queue is empty
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  is_empty  True if empty, False if not.
@@ -123,7 +123,7 @@ extern int ia_css_queue_is_empty(
                        ia_css_queue_t *qhandle,
                        bool *is_empty);
 
-/** @brief Check if the queue is full
+/* @brief Check if the queue is full
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  is_full   True if Full, False if not.
@@ -136,7 +136,7 @@ extern int ia_css_queue_is_full(
                        ia_css_queue_t *qhandle,
                        bool *is_full);
 
-/** @brief Get used space in the queue
+/* @brief Get used space in the queue
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  size      Number of available elements in the queue
@@ -148,7 +148,7 @@ extern int ia_css_queue_get_used_space(
                        ia_css_queue_t *qhandle,
                        uint32_t *size);
 
-/** @brief Get free space in the queue
+/* @brief Get free space in the queue
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  size      Number of free elements in the queue
@@ -160,7 +160,7 @@ extern int ia_css_queue_get_free_space(
                        ia_css_queue_t *qhandle,
                        uint32_t *size);
 
-/** @brief Peek at an element in the queue
+/* @brief Peek at an element in the queue
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  offset   Offset of element to peek,
@@ -175,7 +175,7 @@ extern int ia_css_queue_peek(
                uint32_t offset,
                uint32_t *element);
 
-/** @brief Get the usable size for the queue
+/* @brief Get the usable size for the queue
  *
  * @param[in]  qhandle. Handle to queue instance
  * @param[out] size     Size value to be returned here.
index efa9c14..370ff38 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -44,7 +44,7 @@ enum ia_css_err ia_css_rmgr_init(void)
        return err;
 }
 
-/**
+/*
  * @brief Uninitialize resource pool (host)
  */
 void ia_css_rmgr_uninit(void)
index e56006c..54239ac 100644 (file)
 #include <memory_access.h>    /* mmmgr_malloc, mhmm_free */
 #include <ia_css_debug.h>
 
-/**
+/*
  * @brief VBUF resource handles
  */
 #define NUM_HANDLES 1000
 struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES];
 
-/**
+/*
  * @brief VBUF resource pool - refpool
  */
 struct ia_css_rmgr_vbuf_pool refpool = {
@@ -37,7 +37,7 @@ struct ia_css_rmgr_vbuf_pool refpool = {
        NULL,                   /* handles */
 };
 
-/**
+/*
  * @brief VBUF resource pool - writepool
  */
 struct ia_css_rmgr_vbuf_pool writepool = {
@@ -48,7 +48,7 @@ struct ia_css_rmgr_vbuf_pool writepool = {
        NULL,                   /* handles */
 };
 
-/**
+/*
  * @brief VBUF resource pool - hmmbufferpool
  */
 struct ia_css_rmgr_vbuf_pool hmmbufferpool = {
@@ -63,7 +63,7 @@ struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool;
 struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool;
 struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool;
 
-/**
+/*
  * @brief Initialize the reference count (host, vbuf)
  */
 static void rmgr_refcount_init_vbuf(void)
@@ -72,7 +72,7 @@ static void rmgr_refcount_init_vbuf(void)
        memset(&handle_table, 0, sizeof(handle_table));
 }
 
-/**
+/*
  * @brief Retain the reference count for a handle (host, vbuf)
  *
  * @param handle       The pointer to the handle
@@ -109,7 +109,7 @@ void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
        (*handle)->count++;
 }
 
-/**
+/*
  * @brief Release the reference count for a handle (host, vbuf)
  *
  * @param handle       The pointer to the handle
@@ -131,7 +131,7 @@ void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
        }
 }
 
-/**
+/*
  * @brief Initialize the resource pool (host, vbuf)
  *
  * @param pool The pointer to the pool
@@ -163,7 +163,7 @@ enum ia_css_err ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
        return err;
 }
 
-/**
+/*
  * @brief Uninitialize the resource pool (host, vbuf)
  *
  * @param pool The pointer to the pool
@@ -197,7 +197,7 @@ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
        }
 }
 
-/**
+/*
  * @brief Push a handle to the pool
  *
  * @param pool         The pointer to the pool
@@ -224,7 +224,7 @@ void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
        assert(succes);
 }
 
-/**
+/*
  * @brief Pop a handle from the pool
  *
  * @param pool         The pointer to the pool
@@ -254,7 +254,7 @@ void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
        }
 }
 
-/**
+/*
  * @brief Acquire a handle from the pool (host, vbuf)
  *
  * @param pool         The pointer to the pool
@@ -302,7 +302,7 @@ void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
        ia_css_rmgr_refcount_retain_vbuf(handle);
 }
 
-/**
+/*
  * @brief Release a handle to the pool (host, vbuf)
  *
  * @param pool         The pointer to the pool
index 27e9eb1..bc4b172 100644 (file)
@@ -37,17 +37,17 @@ more details.
 
 
 typedef struct {
-       uint32_t        ddr_data_offset;       /**<  posistion of data in DDR */
-       uint32_t        dmem_data_addr;        /**< data segment address in dmem */
-       uint32_t        dmem_bss_addr;         /**< bss segment address in dmem  */
-       uint32_t        data_size;             /**< data segment size            */
-       uint32_t        bss_size;              /**< bss segment size             */
-       uint32_t        spctrl_config_dmem_addr; /** <location of dmem_cfg  in SP dmem */
-       uint32_t        spctrl_state_dmem_addr;  /** < location of state  in SP dmem */
-       unsigned int    sp_entry;                /** < entry function ptr on SP */
-       const void      *code;                   /**< location of firmware */
+       uint32_t        ddr_data_offset;       /**  posistion of data in DDR */
+       uint32_t        dmem_data_addr;        /** data segment address in dmem */
+       uint32_t        dmem_bss_addr;         /** bss segment address in dmem  */
+       uint32_t        data_size;             /** data segment size            */
+       uint32_t        bss_size;              /** bss segment size             */
+       uint32_t        spctrl_config_dmem_addr; /* <location of dmem_cfg  in SP dmem */
+       uint32_t        spctrl_state_dmem_addr;  /* < location of state  in SP dmem */
+       unsigned int    sp_entry;                /* < entry function ptr on SP */
+       const void      *code;                   /** location of firmware */
        uint32_t         code_size;
-       char      *program_name;    /**< not used on hardware, only for simulation */
+       char      *program_name;    /** not used on hardware, only for simulation */
 } ia_css_spctrl_cfg;
 
 /* Get the code addr in DDR of SP */
index 3af2891..2620d75 100644 (file)
@@ -41,16 +41,16 @@ typedef enum {
        IA_CSS_SP_SW_RUNNING
 } ia_css_spctrl_sp_sw_state;
 
-/** Structure to encapsulate required arguments for
+/* Structure to encapsulate required arguments for
  * initialization of SP DMEM using the SP itself
  */
 struct ia_css_sp_init_dmem_cfg {
-       ia_css_ptr      ddr_data_addr;  /**< data segment address in ddr  */
-       uint32_t        dmem_data_addr; /**< data segment address in dmem */
-       uint32_t        dmem_bss_addr;  /**< bss segment address in dmem  */
-       uint32_t        data_size;      /**< data segment size            */
-       uint32_t        bss_size;       /**< bss segment size             */
-       sp_ID_t         sp_id;          /** <sp Id */
+       ia_css_ptr      ddr_data_addr;  /** data segment address in ddr  */
+       uint32_t        dmem_data_addr; /** data segment address in dmem */
+       uint32_t        dmem_bss_addr;  /** bss segment address in dmem  */
+       uint32_t        data_size;      /** data segment size            */
+       uint32_t        bss_size;       /** bss segment size             */
+       sp_ID_t         sp_id;          /* <sp Id */
 };
 
 #define SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT \
index 6d9bceb..844e4d5 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -39,7 +39,7 @@ more details.
 
 struct spctrl_context_info {
        struct ia_css_sp_init_dmem_cfg dmem_config;
-       uint32_t        spctrl_config_dmem_addr; /** location of dmem_cfg  in SP dmem */
+       uint32_t        spctrl_config_dmem_addr; /* location of dmem_cfg  in SP dmem */
        uint32_t        spctrl_state_dmem_addr;
        unsigned int    sp_entry;           /* entry function ptr on SP */
        hrt_vaddress    code_addr;          /* sp firmware location in host mem-DDR*/
index f92b6a9..322bb3d 100644 (file)
@@ -176,7 +176,7 @@ static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
 
 static bool fw_explicitly_loaded = false;
 
-/**
+/*
  * Local prototypes
  */
 
@@ -187,7 +187,7 @@ static enum ia_css_err
 sh_css_pipe_start(struct ia_css_stream *stream);
 
 #ifdef ISP2401
-/**
+/*
  * @brief Stop all "ia_css_pipe" instances in the target
  * "ia_css_stream" instance.
  *
@@ -207,7 +207,7 @@ sh_css_pipe_start(struct ia_css_stream *stream);
 static enum ia_css_err
 sh_css_pipes_stop(struct ia_css_stream *stream);
 
-/**
+/*
  * @brief Check if all "ia_css_pipe" instances in the target
  * "ia_css_stream" instance have stopped.
  *
@@ -1649,7 +1649,7 @@ ia_css_init(const struct ia_css_env *env,
        void (*flush_func)(struct ia_css_acc_fw *fw);
        hrt_data select, enable;
 
-       /**
+       /*
         * The C99 standard does not specify the exact object representation of structs;
         * the representation is compiler dependent.
         *
@@ -4617,23 +4617,23 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
  * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
  */
 static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
-       IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,    /**< Output frame ready. */
-       IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE,     /**< Second output frame ready. */
-       IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /**< Viewfinder Output frame ready. */
-       IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE,  /**< Second viewfinder Output frame ready. */
-       IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE,   /**< Indication that 3A statistics are available. */
-       IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE,  /**< Indication that DIS statistics are available. */
-       IA_CSS_EVENT_TYPE_PIPELINE_DONE,        /**< Pipeline Done event, sent after last pipeline stage. */
-       IA_CSS_EVENT_TYPE_FRAME_TAGGED,         /**< Frame tagged. */
-       IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE,     /**< Input frame ready. */
-       IA_CSS_EVENT_TYPE_METADATA_DONE,        /**< Metadata ready. */
-       IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /**< Indication that LACE statistics are available. */
-       IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE,   /**< Extension stage executed. */
-       IA_CSS_EVENT_TYPE_TIMER,                /**< Timing measurement data. */
-       IA_CSS_EVENT_TYPE_PORT_EOF,             /**< End Of Frame event, sent when in buffered sensor mode. */
-       IA_CSS_EVENT_TYPE_FW_WARNING,           /**< Performance warning encountered by FW */
-       IA_CSS_EVENT_TYPE_FW_ASSERT,            /**< Assertion hit by FW */
-       0,                                      /** error if sp passes  SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
+       IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,    /** Output frame ready. */
+       IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE,     /** Second output frame ready. */
+       IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /** Viewfinder Output frame ready. */
+       IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE,  /** Second viewfinder Output frame ready. */
+       IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE,   /** Indication that 3A statistics are available. */
+       IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE,  /** Indication that DIS statistics are available. */
+       IA_CSS_EVENT_TYPE_PIPELINE_DONE,        /** Pipeline Done event, sent after last pipeline stage. */
+       IA_CSS_EVENT_TYPE_FRAME_TAGGED,         /** Frame tagged. */
+       IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE,     /** Input frame ready. */
+       IA_CSS_EVENT_TYPE_METADATA_DONE,        /** Metadata ready. */
+       IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /** Indication that LACE statistics are available. */
+       IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE,   /** Extension stage executed. */
+       IA_CSS_EVENT_TYPE_TIMER,                /** Timing measurement data. */
+       IA_CSS_EVENT_TYPE_PORT_EOF,             /** End Of Frame event, sent when in buffered sensor mode. */
+       IA_CSS_EVENT_TYPE_FW_WARNING,           /** Performance warning encountered by FW */
+       IA_CSS_EVENT_TYPE_FW_ASSERT,            /** Assertion hit by FW */
+       0,                                      /* error if sp passes  SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
 };
 
 enum ia_css_err
@@ -5028,7 +5028,7 @@ sh_css_enable_cont_capt(bool enable, bool stop_copy_preview)
 bool
 sh_css_continuous_is_enabled(uint8_t pipe_num)
 #else
-/**
+/*
  * @brief Stop all "ia_css_pipe" instances in the target
  * "ia_css_stream" instance.
  *
@@ -5107,7 +5107,7 @@ ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
        return IA_CSS_SUCCESS;
 }
 #else
-       /**
+       /*
         * Stop all "ia_css_pipe" instances in this target
         * "ia_css_stream" instance.
         */
@@ -5146,7 +5146,7 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth)
                }
        }
 
-       /**
+       /*
         * In the CSS firmware use scenario "Continuous Preview"
         * as well as "Continuous Video", the "ia_css_pipe" instance
         * "Copy Pipe" is activated. This "Copy Pipe" is private to
@@ -5183,7 +5183,7 @@ ERR:
        return err;
 }
 
-/**
+/*
  * @brief Check if all "ia_css_pipe" instances in the target
  * "ia_css_stream" instance have stopped.
  *
@@ -5218,7 +5218,7 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
        main_pipe_id = main_pipe->mode;
        IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
 
-       /**
+       /*
         * Check if every "ia_css_pipe" instance in this target
         * "ia_css_stream" instance has stopped.
         */
@@ -5229,7 +5229,7 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
                                rval);
        }
 
-       /**
+       /*
         * In the CSS firmware use scenario "Continuous Preview"
         * as well as "Continuous Video", the "ia_css_pipe" instance
         * "Copy Pipe" is activated. This "Copy Pipe" is private to
@@ -5474,7 +5474,7 @@ ERR:
 }
 
 #ifdef ISP2401
-/**
+/*
  * @brief Check if a format is supported by the pipe.
  *
  */
@@ -8626,7 +8626,7 @@ sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
        return err;
 }
 
-/**
+/*
  * @brief Tag a specific frame in continuous capture.
  * Refer to "sh_css_internal.h" for details.
  */
@@ -8666,7 +8666,7 @@ enum ia_css_err ia_css_stream_capture_frame(struct ia_css_stream *stream,
        return err;
 }
 
-/**
+/*
  * @brief Configure the continuous capture.
  * Refer to "sh_css_internal.h" for details.
  */
@@ -8822,7 +8822,7 @@ sh_css_init_host_sp_control_vars(void)
                "sh_css_init_host_sp_control_vars() leave: return_void\n");
 }
 
-/**
+/*
  * create the internal structures and fill in the configuration data
  */
 void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config)
@@ -10435,7 +10435,7 @@ ia_css_start_sp(void)
        return err;
 }
 
-/**
+/*
  *     Time to wait SP for termincate. Only condition when this can happen
  *     is a fatal hw failure, but we must be able to detect this and emit
  *     a proper error trace.
@@ -10713,7 +10713,7 @@ ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
        return ret;
 }
 
-/** @brief     Set the state (Enable or Disable) of the Extension stage in the
+/* @brief      Set the state (Enable or Disable) of the Extension stage in the
  *             given pipe.
  */
 enum ia_css_err
@@ -10758,7 +10758,7 @@ ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle, bool
        return err;
 }
 
-/**    @brief  Get the state (Enable or Disable) of the Extension stage in the
+/*     @brief  Get the state (Enable or Disable) of the Extension stage in the
  *     given pipe.
  */
 enum ia_css_err
index 0910021..161122e 100644 (file)
@@ -188,7 +188,7 @@ enum host2sp_commands {
        N_host2sp_cmd
 };
 
-/** Enumeration used to indicate the events that are produced by
+/* Enumeration used to indicate the events that are produced by
  *  the SP and consumed by the Host.
  *
  * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
@@ -274,10 +274,10 @@ struct sh_css_ddr_address_map_compound {
 };
 
 struct ia_css_isp_parameter_set_info {
-       struct sh_css_ddr_address_map  mem_map;/**< pointers to Parameters in ISP format IMPT:
+       struct sh_css_ddr_address_map  mem_map;/** pointers to Parameters in ISP format IMPT:
                                                    This should be first member of this struct */
-       uint32_t                       isp_parameters_id;/**< Unique ID to track which config was actually applied to a particular frame */
-       ia_css_ptr                     output_frame_ptr;/**< Output frame to which this config has to be applied (optional) */
+       uint32_t                       isp_parameters_id;/** Unique ID to track which config was actually applied to a particular frame */
+       ia_css_ptr                     output_frame_ptr;/** Output frame to which this config has to be applied (optional) */
 };
 
 /* this struct contains all arguments that can be passed to
@@ -398,9 +398,9 @@ struct sh_css_sp_input_formatter_set {
 /* SP configuration information */
 struct sh_css_sp_config {
        uint8_t                 no_isp_sync; /* Signal host immediately after start */
-       uint8_t                 enable_raw_pool_locking; /**< Enable Raw Buffer Locking for HALv3 Support */
+       uint8_t                 enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */
        uint8_t                 lock_all;
-       /**< If raw buffer locking is enabled, this flag indicates whether raw
+       /** If raw buffer locking is enabled, this flag indicates whether raw
             frames are locked when their EOF event is successfully sent to the
             host (true) or when they are passed to the preview/video pipe
             (false). */
@@ -458,13 +458,13 @@ struct sh_css_sp_pipeline_io {
        /*struct sh_css_sp_pipeline_terminal    output;*/
 };
 
-/** This struct tracks how many streams are registered per CSI port.
+/* This struct tracks how many streams are registered per CSI port.
  * This is used to track which streams have already been configured.
  * Only when all streams are configured, the CSI RX is started for that port.
  */
 struct sh_css_sp_pipeline_io_status {
-       uint32_t        active[N_INPUT_SYSTEM_CSI_PORT];        /**< registered streams */
-       uint32_t        running[N_INPUT_SYSTEM_CSI_PORT];       /**< configured streams */
+       uint32_t        active[N_INPUT_SYSTEM_CSI_PORT];        /** registered streams */
+       uint32_t        running[N_INPUT_SYSTEM_CSI_PORT];       /** configured streams */
 };
 
 #endif
@@ -500,7 +500,7 @@ enum sh_css_port_type {
 #define SH_CSS_METADATA_OFFLINE_MODE   0x04
 #define SH_CSS_METADATA_WAIT_INPUT     0x08
 
-/** @brief Free an array of metadata buffers.
+/* @brief Free an array of metadata buffers.
  *
  * @param[in]  num_bufs        Number of metadata buffers to be freed.
  * @param[in]  bufs            Pointer of array of metadata buffers.
@@ -764,7 +764,7 @@ struct sh_css_hmm_buffer {
                        hrt_vaddress    frame_data;
                        uint32_t        flashed;
                        uint32_t        exp_id;
-                       uint32_t        isp_parameters_id; /**< Unique ID to track which config was
+                       uint32_t        isp_parameters_id; /** Unique ID to track which config was
                                                                actually applied to a particular frame */
 #if CONFIG_ON_FRAME_ENQUEUE()
                        struct sh_css_config_on_frame_enqueue config_on_frame_enqueue;
index e127892..4bcc35d 100644 (file)
@@ -22,7 +22,7 @@
 #include <ia_css_pipe_public.h>
 #include <ia_css_stream_public.h>
 
-/** The pipe id type, distinguishes the kind of pipes that
+/* The pipe id type, distinguishes the kind of pipes that
  *  can be run in parallel.
  */
 enum ia_css_pipe_id {
index 36aaa30..883474e 100644 (file)
@@ -321,7 +321,7 @@ calculate_mipi_buff_size(
        height = stream_cfg->input_config.input_res.height;
        format = stream_cfg->input_config.format;
        pack_raw_pixels = stream_cfg->pack_raw_pixels;
-       /** end of NOTE */
+       /* end of NOTE */
 
        /**
 #ifndef ISP2401
@@ -341,7 +341,7 @@ calculate_mipi_buff_size(
         * in the non-continuous use scenario.
         */
        width_padded = width + (2 * ISP_VEC_NELEMS);
-       /** end of NOTE */
+       /* end of NOTE */
 
        IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n",
                     width_padded, height, format);
index a7ffe6d..270ec2b 100644 (file)
@@ -144,8 +144,8 @@ struct ia_css_isp_parameters {
        struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM];
        struct sh_css_ddr_address_map ddr_ptrs;
        struct sh_css_ddr_address_map_size ddr_ptrs_size;
-       struct ia_css_frame *output_frame; /**< Output frame the config is to be applied to (optional) */
-       uint32_t isp_parameters_id; /**< Unique ID to track which config was actually applied to a particular frame */
+       struct ia_css_frame *output_frame; /** Output frame the config is to be applied to (optional) */
+       uint32_t isp_parameters_id; /** Unique ID to track which config was actually applied to a particular frame */
 };
 
 void
index e6a3459..6fc00fc 100644 (file)
@@ -261,7 +261,7 @@ sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
        assert(out_frame != NULL);
 
        {
-               /**
+               /*
                 * Clear sh_css_sp_stage for easy debugging.
                 * program_input_circuit must be saved as it is set outside
                 * this function.
@@ -335,7 +335,7 @@ sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
        assert(out_frame != NULL);
 
        {
-               /**
+               /*
                 * Clear sh_css_sp_stage for easy debugging.
                 * program_input_circuit must be saved as it is set outside
                 * this function.
@@ -909,7 +909,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
        xinfo = binary->info;
        info  = &xinfo->sp;
        {
-               /**
+               /*
                 * Clear sh_css_sp_stage for easy debugging.
                 * program_input_circuit must be saved as it is set outside
                 * this function.
@@ -980,7 +980,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
        sh_css_isp_stage.binary_name[SH_CSS_MAX_BINARY_NAME - 1] = 0;
        sh_css_isp_stage.mem_initializers = *isp_mem_if;
 
-       /**
+       /*
         * Even when a stage does not need uds and does not params,
         * ia_css_uds_sp_scale_params() seems to be called (needs
         * further investigation). This function can not deal with
@@ -1429,7 +1429,7 @@ sh_css_init_host2sp_frame_data(void)
 }
 
 
-/**
+/*
  * @brief Update the offline frame information in host_sp_communication.
  * Refer to "sh_css_sp.h" for more details.
  */
@@ -1461,7 +1461,7 @@ sh_css_update_host2sp_offline_frame(
 }
 
 #if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
-/**
+/*
  * @brief Update the mipi frame information in host_sp_communication.
  * Refer to "sh_css_sp.h" for more details.
  */
@@ -1488,7 +1488,7 @@ sh_css_update_host2sp_mipi_frame(
                                frame ? frame->data : 0);
 }
 
-/**
+/*
  * @brief Update the mipi metadata information in host_sp_communication.
  * Refer to "sh_css_sp.h" for more details.
  */
@@ -1735,7 +1735,7 @@ ia_css_isp_has_started(void)
 }
 
 
-/**
+/*
  * @brief Initialize the DMA software-mask in the debug mode.
  * Refer to "sh_css_sp.h" for more details.
  */
@@ -1761,7 +1761,7 @@ sh_css_sp_init_dma_sw_reg(int dma_id)
        return true;
 }
 
-/**
+/*
  * @brief Set the DMA software-mask in the debug mode.
  * Refer to "sh_css_sp.h" for more details.
  */
index e49e478..0b8e3d8 100644 (file)
@@ -61,7 +61,7 @@ struct sh_css {
 #endif
        hrt_vaddress                   sp_bin_addr;
        hrt_data                       page_table_base_index;
-       unsigned int                   size_mem_words; /** \deprecated{Use ia_css_mipi_buffer_config instead.}*/
+       unsigned int                   size_mem_words; /* \deprecated{Use ia_css_mipi_buffer_config instead.}*/
        enum ia_css_irq_type           irq_type;
        unsigned int                   pipe_counter;
        
index 068aece..cded30f 100644 (file)
@@ -394,7 +394,7 @@ struct octeon_hcd {
                                result = -1;                                \
                                break;                                      \
                        } else                                              \
-                               cvmx_wait(100);                             \
+                               __delay(100);                               \
                }                                                           \
        } while (0);                                                        \
        result; })
@@ -774,7 +774,7 @@ retry:
        usbn_clk_ctl.s.hclk_rst = 1;
        cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
        /* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
-       cvmx_wait(64);
+       __delay(64);
        /*
         * 3. Program the power-on reset field in the USBN clock-control
         *    register:
@@ -795,7 +795,7 @@ retry:
        cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
                            usbn_usbp_ctl_status.u64);
        /* 6. Wait 10 cycles */
-       cvmx_wait(10);
+       __delay(10);
        /*
         * 7. Clear ATE_RESET field in the USBN clock-control register:
         *    USBN_USBP_CTL_STATUS[ATE_RESET] = 0
index e69a215..12c9df9 100644 (file)
@@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi)
 
        currentValue = READ_REG(REG_DATAMODUL);
 
-       switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define
+       switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
        case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
        case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
        default:                            return undefined;
index c0664dc..4463107 100644 (file)
@@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
        if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
            (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
                len = pcur_bss->Ssid.SsidLength;
-
-               wrqu->essid.length = len;
-
                memcpy(extra, pcur_bss->Ssid.Ssid, len);
-
-               wrqu->essid.flags = 1;
        } else {
-               ret = -1;
-               goto exit;
+               len = 0;
+               *extra = 0;
        }
-
-exit:
-
+       wrqu->essid.length = len;
+       wrqu->essid.flags = 1;
 
        return ret;
 }
index 7c69b4a..0d99b24 100644 (file)
@@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                        " %d i: %d bio: %p, allocating another"
                                        " bio\n", bio->bi_vcnt, i, bio);
 
-                               rc = blk_rq_append_bio(req, bio);
+                               rc = blk_rq_append_bio(req, &bio);
                                if (rc) {
                                        pr_err("pSCSI: failed to append bio\n");
                                        goto fail;
@@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        }
 
        if (bio) {
-               rc = blk_rq_append_bio(req, bio);
+               rc = blk_rq_append_bio(req, &bio);
                if (rc) {
                        pr_err("pSCSI: failed to append bio\n");
                        goto fail;
index 7952357..edb6e4e 100644 (file)
@@ -590,7 +590,6 @@ static int __init optee_driver_init(void)
                return -ENODEV;
 
        np = of_find_matching_node(fw_np, optee_match);
-       of_node_put(fw_np);
        if (!np)
                return -ENODEV;
 
index dc63aba..dfd2324 100644 (file)
@@ -88,7 +88,6 @@ struct time_in_idle {
  * @policy: cpufreq policy.
  * @node: list_head to link all cpufreq_cooling_device together.
  * @idle_time: idle time stats
- * @plat_get_static_power: callback to calculate the static power
  *
  * This structure is required for keeping information of each registered
  * cpufreq_cooling_device.
@@ -104,7 +103,6 @@ struct cpufreq_cooling_device {
        struct cpufreq_policy *policy;
        struct list_head node;
        struct time_in_idle *idle_time;
-       get_static_t plat_get_static_power;
 };
 
 static DEFINE_IDA(cpufreq_ida);
@@ -318,60 +316,6 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
        return load;
 }
 
-/**
- * get_static_power() - calculate the static power consumed by the cpus
- * @cpufreq_cdev:      struct &cpufreq_cooling_device for this cpu cdev
- * @tz:                thermal zone device in which we're operating
- * @freq:      frequency in KHz
- * @power:     pointer in which to store the calculated static power
- *
- * Calculate the static power consumed by the cpus described by
- * @cpu_actor running at frequency @freq.  This function relies on a
- * platform specific function that should have been provided when the
- * actor was registered.  If it wasn't, the static power is assumed to
- * be negligible.  The calculated static power is stored in @power.
- *
- * Return: 0 on success, -E* on failure.
- */
-static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
-                           struct thermal_zone_device *tz, unsigned long freq,
-                           u32 *power)
-{
-       struct dev_pm_opp *opp;
-       unsigned long voltage;
-       struct cpufreq_policy *policy = cpufreq_cdev->policy;
-       struct cpumask *cpumask = policy->related_cpus;
-       unsigned long freq_hz = freq * 1000;
-       struct device *dev;
-
-       if (!cpufreq_cdev->plat_get_static_power) {
-               *power = 0;
-               return 0;
-       }
-
-       dev = get_cpu_device(policy->cpu);
-       WARN_ON(!dev);
-
-       opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
-       if (IS_ERR(opp)) {
-               dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
-                                    freq_hz, PTR_ERR(opp));
-               return -EINVAL;
-       }
-
-       voltage = dev_pm_opp_get_voltage(opp);
-       dev_pm_opp_put(opp);
-
-       if (voltage == 0) {
-               dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
-                                   freq_hz);
-               return -EINVAL;
-       }
-
-       return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
-                                                 voltage, power);
-}
-
 /**
  * get_dynamic_power() - calculate the dynamic power
  * @cpufreq_cdev:      &cpufreq_cooling_device for this cdev
@@ -491,8 +435,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
                                       u32 *power)
 {
        unsigned long freq;
-       int i = 0, cpu, ret;
-       u32 static_power, dynamic_power, total_load = 0;
+       int i = 0, cpu;
+       u32 total_load = 0;
        struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
        struct cpufreq_policy *policy = cpufreq_cdev->policy;
        u32 *load_cpu = NULL;
@@ -522,22 +466,15 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
 
        cpufreq_cdev->last_load = total_load;
 
-       dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
-       ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
-       if (ret) {
-               kfree(load_cpu);
-               return ret;
-       }
+       *power = get_dynamic_power(cpufreq_cdev, freq);
 
        if (load_cpu) {
                trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
-                                                 load_cpu, i, dynamic_power,
-                                                 static_power);
+                                                 load_cpu, i, *power);
 
                kfree(load_cpu);
        }
 
-       *power = static_power + dynamic_power;
        return 0;
 }
 
@@ -561,8 +498,6 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
                               unsigned long state, u32 *power)
 {
        unsigned int freq, num_cpus;
-       u32 static_power, dynamic_power;
-       int ret;
        struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
 
        /* Request state should be less than max_level */
@@ -572,13 +507,9 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
        num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
 
        freq = cpufreq_cdev->freq_table[state].frequency;
-       dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
-       ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
-       if (ret)
-               return ret;
+       *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
 
-       *power = static_power + dynamic_power;
-       return ret;
+       return 0;
 }
 
 /**
@@ -606,21 +537,14 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
                               unsigned long *state)
 {
        unsigned int cur_freq, target_freq;
-       int ret;
-       s32 dyn_power;
-       u32 last_load, normalised_power, static_power;
+       u32 last_load, normalised_power;
        struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
        struct cpufreq_policy *policy = cpufreq_cdev->policy;
 
        cur_freq = cpufreq_quick_get(policy->cpu);
-       ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
-       if (ret)
-               return ret;
-
-       dyn_power = power - static_power;
-       dyn_power = dyn_power > 0 ? dyn_power : 0;
+       power = power > 0 ? power : 0;
        last_load = cpufreq_cdev->last_load ?: 1;
-       normalised_power = (dyn_power * 100) / last_load;
+       normalised_power = (power * 100) / last_load;
        target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
 
        *state = get_level(cpufreq_cdev, target_freq);
@@ -671,8 +595,6 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
  * @policy: cpufreq policy
  * Normally this should be same as cpufreq policy->related_cpus.
  * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- *                    cpus (optional)
  *
  * This interface function registers the cpufreq cooling device with the name
  * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -684,8 +606,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
  */
 static struct thermal_cooling_device *
 __cpufreq_cooling_register(struct device_node *np,
-                       struct cpufreq_policy *policy, u32 capacitance,
-                       get_static_t plat_static_func)
+                       struct cpufreq_policy *policy, u32 capacitance)
 {
        struct thermal_cooling_device *cdev;
        struct cpufreq_cooling_device *cpufreq_cdev;
@@ -755,8 +676,6 @@ __cpufreq_cooling_register(struct device_node *np,
        }
 
        if (capacitance) {
-               cpufreq_cdev->plat_get_static_power = plat_static_func;
-
                ret = update_freq_table(cpufreq_cdev, capacitance);
                if (ret) {
                        cdev = ERR_PTR(ret);
@@ -813,13 +732,12 @@ free_cdev:
 struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy)
 {
-       return __cpufreq_cooling_register(NULL, policy, 0, NULL);
+       return __cpufreq_cooling_register(NULL, policy, 0);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
 
 /**
  * of_cpufreq_cooling_register - function to create cpufreq cooling device.
- * @np: a valid struct device_node to the cooling device device tree node
  * @policy: cpufreq policy
  *
  * This interface function registers the cpufreq cooling device with the name
@@ -827,86 +745,45 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
  * cooling devices. Using this API, the cpufreq cooling device will be
  * linked to the device tree node provided.
  *
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
-                           struct cpufreq_policy *policy)
-{
-       if (!np)
-               return ERR_PTR(-EINVAL);
-
-       return __cpufreq_cooling_register(np, policy, 0, NULL);
-}
-EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
-
-/**
- * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @policy:            cpufreq policy
- * @capacitance:       dynamic power coefficient for these cpus
- * @plat_static_func:  function to calculate the static power consumed by these
- *                     cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x".  This api can support multiple
- * instances of cpufreq cooling devices.  Using this function, the
- * cooling device will implement the power extensions by using a
- * simple cpu power model.  The cpus must have registered their OPPs
- * using the OPP library.
- *
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus.  If the platform's static
- * power consumption is unknown or negligible, make it NULL.
- *
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
-                              get_static_t plat_static_func)
-{
-       return __cpufreq_cooling_register(NULL, policy, capacitance,
-                               plat_static_func);
-}
-EXPORT_SYMBOL(cpufreq_power_cooling_register);
-
-/**
- * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @np:        a valid struct device_node to the cooling device device tree node
- * @policy: cpufreq policy
- * @capacitance:       dynamic power coefficient for these cpus
- * @plat_static_func:  function to calculate the static power consumed by these
- *                     cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x".  This api can support multiple
- * instances of cpufreq cooling devices.  Using this API, the cpufreq
- * cooling device will be linked to the device tree node provided.
  * Using this function, the cooling device will implement the power
  * extensions by using a simple cpu power model.  The cpus must have
  * registered their OPPs using the OPP library.
  *
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus.  If the platform's static
- * power consumption is unknown or negligible, make it NULL.
+ * It also takes into account, if property present in policy CPU node, the
+ * static power consumed by the cpu.
  *
  * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
+ * and NULL on failure.
  */
 struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
-                                 struct cpufreq_policy *policy,
-                                 u32 capacitance,
-                                 get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
 {
-       if (!np)
-               return ERR_PTR(-EINVAL);
+       struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
+       struct thermal_cooling_device *cdev = NULL;
+       u32 capacitance = 0;
+
+       if (!np) {
+               pr_err("cpu_cooling: OF node not available for cpu%d\n",
+                      policy->cpu);
+               return NULL;
+       }
+
+       if (of_find_property(np, "#cooling-cells", NULL)) {
+               of_property_read_u32(np, "dynamic-power-coefficient",
+                                    &capacitance);
 
-       return __cpufreq_cooling_register(np, policy, capacitance,
-                               plat_static_func);
+               cdev = __cpufreq_cooling_register(np, policy, capacitance);
+               if (IS_ERR(cdev)) {
+                       pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
+                              policy->cpu, PTR_ERR(cdev));
+                       cdev = NULL;
+               }
+       }
+
+       of_node_put(np);
+       return cdev;
 }
-EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
+EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
 
 /**
  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
index 419a7a9..f45bcbc 100644 (file)
@@ -339,7 +339,7 @@ static void __ring_interrupt(struct tb_ring *ring)
                return;
 
        if (ring->start_poll) {
-               __ring_interrupt_mask(ring, false);
+               __ring_interrupt_mask(ring, true);
                ring->start_poll(ring->poll_data);
        } else {
                schedule_work(&ring->work);
index 427e0d5..539b49a 100644 (file)
@@ -1762,7 +1762,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
 {
        struct n_tty_data *ldata = tty->disc_data;
 
-       if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
+       if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
                bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
                ldata->line_start = ldata->read_tail;
                if (!L_ICANON(tty) || !read_cnt(ldata)) {
@@ -2425,7 +2425,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
                return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
        case TIOCINQ:
                down_write(&tty->termios_rwsem);
-               if (L_ICANON(tty))
+               if (L_ICANON(tty) && !L_EXTPROC(tty))
                        retval = inq_canon(ldata);
                else
                        retval = read_cnt(ldata);
index 1bef398..28133db 100644 (file)
@@ -132,6 +132,33 @@ void serdev_device_close(struct serdev_device *serdev)
 }
 EXPORT_SYMBOL_GPL(serdev_device_close);
 
+static void devm_serdev_device_release(struct device *dev, void *dr)
+{
+       serdev_device_close(*(struct serdev_device **)dr);
+}
+
+int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
+{
+       struct serdev_device **dr;
+       int ret;
+
+       dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return -ENOMEM;
+
+       ret = serdev_device_open(serdev);
+       if (ret) {
+               devres_free(dr);
+               return ret;
+       }
+
+       *dr = serdev;
+       devres_add(dev, dr);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devm_serdev_device_open);
+
 void serdev_device_write_wakeup(struct serdev_device *serdev)
 {
        complete(&serdev->write_comp);
@@ -268,8 +295,8 @@ static int serdev_drv_probe(struct device *dev)
 static int serdev_drv_remove(struct device *dev)
 {
        const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
-
-       sdrv->remove(to_serdev_device(dev));
+       if (sdrv->remove)
+               sdrv->remove(to_serdev_device(dev));
        return 0;
 }
 
index ce7ad0a..247788a 100644 (file)
@@ -27,23 +27,41 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
 {
        struct serdev_controller *ctrl = port->client_data;
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
+       int ret;
 
        if (!test_bit(SERPORT_ACTIVE, &serport->flags))
                return 0;
 
-       return serdev_controller_receive_buf(ctrl, cp, count);
+       ret = serdev_controller_receive_buf(ctrl, cp, count);
+
+       dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
+                               "receive_buf returns %d (count = %zu)\n",
+                               ret, count);
+       if (ret < 0)
+               return 0;
+       else if (ret > count)
+               return count;
+
+       return ret;
 }
 
 static void ttyport_write_wakeup(struct tty_port *port)
 {
        struct serdev_controller *ctrl = port->client_data;
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
+       struct tty_struct *tty;
+
+       tty = tty_port_tty_get(port);
+       if (!tty)
+               return;
 
-       if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) &&
+       if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
            test_bit(SERPORT_ACTIVE, &serport->flags))
                serdev_controller_write_wakeup(ctrl);
 
-       wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT);
+       wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+
+       tty_kref_put(tty);
 }
 
 static const struct tty_port_client_operations client_ops = {
@@ -136,8 +154,10 @@ static void ttyport_close(struct serdev_controller *ctrl)
 
        clear_bit(SERPORT_ACTIVE, &serport->flags);
 
+       tty_lock(tty);
        if (tty->ops->close)
                tty->ops->close(tty, NULL);
+       tty_unlock(tty);
 
        tty_release_struct(tty, serport->tty_idx);
 }
index 362c25f..ae6a256 100644 (file)
@@ -122,12 +122,14 @@ static void __init init_port(struct earlycon_device *device)
        serial8250_early_out(port, UART_FCR, 0);        /* no fifo */
        serial8250_early_out(port, UART_MCR, 0x3);      /* DTR + RTS */
 
-       divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
-       c = serial8250_early_in(port, UART_LCR);
-       serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
-       serial8250_early_out(port, UART_DLL, divisor & 0xff);
-       serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
-       serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+       if (port->uartclk && device->baud) {
+               divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
+               c = serial8250_early_in(port, UART_LCR);
+               serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
+               serial8250_early_out(port, UART_DLL, divisor & 0xff);
+               serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
+               serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+       }
 }
 
 int __init early_serial8250_setup(struct earlycon_device *device,
index b7e0e34..54adf8d 100644 (file)
@@ -5135,6 +5135,9 @@ static const struct pci_device_id serial_pci_tbl[] = {
        { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
        { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
 
+       /* Amazon PCI serial device */
+       { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
+
        /*
         * These entries match devices with class COMMUNICATION_SERIAL,
         * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
index 3593ce0..8800099 100644 (file)
@@ -247,7 +247,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
        if (ret)
                goto err_mux;
 
-       ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi");
+       ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi");
        if (ulpi_node) {
                phy_node = of_get_next_available_child(ulpi_node, NULL);
                ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy");
index 8b35144..9a2ab67 100644 (file)
@@ -180,9 +180,9 @@ static int ulpi_of_register(struct ulpi *ulpi)
        /* Find a ulpi bus underneath the parent or the grandparent */
        parent = ulpi->dev.parent;
        if (parent->of_node)
-               np = of_find_node_by_name(parent->of_node, "ulpi");
+               np = of_get_child_by_name(parent->of_node, "ulpi");
        else if (parent->parent && parent->parent->of_node)
-               np = of_find_node_by_name(parent->parent->of_node, "ulpi");
+               np = of_get_child_by_name(parent->parent->of_node, "ulpi");
        if (!np)
                return 0;
 
index da8acd9..c821b4b 100644 (file)
@@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
        unsigned iad_num = 0;
 
        memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+       nintf = nintf_orig = config->desc.bNumInterfaces;
+       config->desc.bNumInterfaces = 0;        // Adjusted later
+
        if (config->desc.bDescriptorType != USB_DT_CONFIG ||
            config->desc.bLength < USB_DT_CONFIG_SIZE ||
            config->desc.bLength > size) {
@@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
        buffer += config->desc.bLength;
        size -= config->desc.bLength;
 
-       nintf = nintf_orig = config->desc.bNumInterfaces;
        if (nintf > USB_MAXINTERFACES) {
                dev_warn(ddev, "config %d has too many interfaces: %d, "
                    "using maximum allowed: %d\n",
@@ -905,14 +907,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
        }
 }
 
+static const __u8 bos_desc_len[256] = {
+       [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
+       [USB_CAP_TYPE_EXT]          = USB_DT_USB_EXT_CAP_SIZE,
+       [USB_SS_CAP_TYPE]           = USB_DT_USB_SS_CAP_SIZE,
+       [USB_SSP_CAP_TYPE]          = USB_DT_USB_SSP_CAP_SIZE(1),
+       [CONTAINER_ID_TYPE]         = USB_DT_USB_SS_CONTN_ID_SIZE,
+       [USB_PTM_CAP_TYPE]          = USB_DT_USB_PTM_ID_SIZE,
+};
+
 /* Get BOS descriptor set */
 int usb_get_bos_descriptor(struct usb_device *dev)
 {
        struct device *ddev = &dev->dev;
        struct usb_bos_descriptor *bos;
        struct usb_dev_cap_header *cap;
+       struct usb_ssp_cap_descriptor *ssp_cap;
        unsigned char *buffer;
-       int length, total_len, num, i;
+       int length, total_len, num, i, ssac;
+       __u8 cap_type;
        int ret;
 
        bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@@ -965,7 +978,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        dev->bos->desc->bNumDeviceCaps = i;
                        break;
                }
+               cap_type = cap->bDevCapabilityType;
                length = cap->bLength;
+               if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
+                       dev->bos->desc->bNumDeviceCaps = i;
+                       break;
+               }
+
                total_len -= length;
 
                if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@@ -973,7 +992,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        continue;
                }
 
-               switch (cap->bDevCapabilityType) {
+               switch (cap_type) {
                case USB_CAP_TYPE_WIRELESS_USB:
                        /* Wireless USB cap descriptor is handled by wusb */
                        break;
@@ -986,8 +1005,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                                (struct usb_ss_cap_descriptor *)buffer;
                        break;
                case USB_SSP_CAP_TYPE:
-                       dev->bos->ssp_cap =
-                               (struct usb_ssp_cap_descriptor *)buffer;
+                       ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
+                       ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
+                               USB_SSP_SUBLINK_SPEED_ATTRIBS);
+                       if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
+                               dev->bos->ssp_cap = ssp_cap;
                        break;
                case CONTAINER_ID_TYPE:
                        dev->bos->ss_id =
index 705c573..a3fad4e 100644 (file)
@@ -1442,14 +1442,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        int number_of_packets = 0;
        unsigned int stream_id = 0;
        void *buf;
-
-       if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
-                               USBDEVFS_URB_SHORT_NOT_OK |
+       unsigned long mask =    USBDEVFS_URB_SHORT_NOT_OK |
                                USBDEVFS_URB_BULK_CONTINUATION |
                                USBDEVFS_URB_NO_FSBR |
                                USBDEVFS_URB_ZERO_PACKET |
-                               USBDEVFS_URB_NO_INTERRUPT))
-               return -EINVAL;
+                               USBDEVFS_URB_NO_INTERRUPT;
+       /* USBDEVFS_URB_ISO_ASAP is a special case */
+       if (uurb->type == USBDEVFS_URB_TYPE_ISO)
+               mask |= USBDEVFS_URB_ISO_ASAP;
+
+       if (uurb->flags & ~mask)
+                       return -EINVAL;
+
        if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
                return -EINVAL;
        if (uurb->buffer_length > 0 && !uurb->buffer)
index 7ccdd3d..cf7bbcb 100644 (file)
@@ -4948,6 +4948,15 @@ loop:
                usb_put_dev(udev);
                if ((status == -ENOTCONN) || (status == -ENOTSUPP))
                        break;
+
+               /* When halfway through our retry count, power-cycle the port */
+               if (i == (SET_CONFIG_TRIES / 2) - 1) {
+                       dev_info(&port_dev->dev, "attempt power cycle\n");
+                       usb_hub_set_port_power(hdev, hub, port1, false);
+                       msleep(2 * hub_power_on_good_delay(hub));
+                       usb_hub_set_port_power(hdev, hub, port1, true);
+                       msleep(hub_power_on_good_delay(hub));
+               }
        }
        if (hub->hdev->parent ||
                        !hcd->driver->port_handed_over ||
index f1dbab6..4024926 100644 (file)
@@ -52,10 +52,11 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Microsoft LifeCam-VX700 v2.0 */
        { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
-       /* Logitech HD Pro Webcams C920, C920-C and C930e */
+       /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+       { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
 
        /* Logitech ConferenceCam CC3000e */
        { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -146,6 +147,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+       { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+
+       /* ELSA MicroLink 56K */
+       { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
        { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
 
index f66c941..31749c7 100644 (file)
@@ -537,6 +537,7 @@ struct dwc2_core_params {
  *                       2 - Internal DMA
  * @power_optimized     Are power optimizations enabled?
  * @num_dev_ep          Number of device endpoints available
+ * @num_dev_in_eps      Number of device IN endpoints available
  * @num_dev_perio_in_ep Number of device periodic IN endpoints
  *                      available
  * @dev_token_q_depth   Device Mode IN Token Sequence Learning Queue
@@ -565,6 +566,7 @@ struct dwc2_core_params {
  *                       2 - 8 or 16 bits
  * @snpsid:             Value from SNPSID register
  * @dev_ep_dirs:        Direction of device endpoints (GHWCFG1)
+ * @g_tx_fifo_size[]   Power-on values of TxFIFO sizes
  */
 struct dwc2_hw_params {
        unsigned op_mode:3;
@@ -586,12 +588,14 @@ struct dwc2_hw_params {
        unsigned fs_phy_type:2;
        unsigned i2c_enable:1;
        unsigned num_dev_ep:4;
+       unsigned num_dev_in_eps : 4;
        unsigned num_dev_perio_in_ep:4;
        unsigned total_fifo_size:16;
        unsigned power_optimized:1;
        unsigned utmi_phy_data_width:2;
        u32 snpsid;
        u32 dev_ep_dirs;
+       u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
 };
 
 /* Size of control and EP0 buffers */
index 88529d0..e4c3ce0 100644 (file)
@@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
 {
        if (hsotg->hw_params.en_multiple_tx_fifo)
                /* In dedicated FIFO mode we need count of IN EPs */
-               return (dwc2_readl(hsotg->regs + GHWCFG4)  &
-                       GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
+               return hsotg->hw_params.num_dev_in_eps;
        else
                /* In shared FIFO mode we need count of Periodic IN EPs */
                return hsotg->hw_params.num_dev_perio_in_ep;
 }
 
-/**
- * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
- */
-static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
-{
-       int val = 0;
-       int i;
-       u32 ep_dirs;
-
-       /*
-        * Don't need additional space for ep info control registers in
-        * slave mode.
-        */
-       if (!using_dma(hsotg)) {
-               dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
-               return 0;
-       }
-
-       /*
-        * Buffer DMA mode - 1 location per endpoit
-        * Descriptor DMA mode - 4 locations per endpoint
-        */
-       ep_dirs = hsotg->hw_params.dev_ep_dirs;
-
-       for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
-               val += ep_dirs & 3 ? 1 : 2;
-               ep_dirs >>= 2;
-       }
-
-       if (using_desc_dma(hsotg))
-               val = val * 4;
-
-       return val;
-}
-
 /**
  * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
  * device mode TX FIFOs
  */
 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
 {
-       int ep_info_size;
        int addr;
        int tx_addr_max;
        u32 np_tx_fifo_size;
@@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
                                hsotg->params.g_np_tx_fifo_size);
 
        /* Get Endpoint Info Control block size in DWORDs. */
-       ep_info_size = dwc2_hsotg_ep_info_size(hsotg);
-       tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
+       tx_addr_max = hsotg->hw_params.total_fifo_size;
 
        addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
        if (tx_addr_max <= addr)
index ef73af6..03fd20f 100644 (file)
@@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
        }
 
        for (fifo = 1; fifo <= fifo_count; fifo++) {
-               dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
-                       FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+               dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
 
                if (hsotg->params.g_tx_fifo_size[fifo] < min ||
                    hsotg->params.g_tx_fifo_size[fifo] >  dptxfszn) {
@@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
        struct dwc2_hw_params *hw = &hsotg->hw_params;
        bool forced;
        u32 gnptxfsiz;
+       int fifo, fifo_count;
 
        if (hsotg->dr_mode == USB_DR_MODE_HOST)
                return;
@@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
 
        gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
 
+       fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+
+       for (fifo = 1; fifo <= fifo_count; fifo++) {
+               hw->g_tx_fifo_size[fifo] =
+                       (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
+                        FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+       }
+
        if (forced)
                dwc2_clear_force_mode(hsotg);
 
@@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
        hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
        grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
 
-       /*
-        * Host specific hardware parameters. Reading these parameters
-        * requires the controller to be in host mode. The mode will
-        * be forced, if necessary, to read these values.
-        */
-       dwc2_get_host_hwparams(hsotg);
-       dwc2_get_dev_hwparams(hsotg);
-
        /* hwcfg1 */
        hw->dev_ep_dirs = hwcfg1;
 
@@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
        hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
        hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
                                  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+       hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
+                            GHWCFG4_NUM_IN_EPS_SHIFT;
        hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
        hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
        hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
@@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
        /* fifo sizes */
        hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
                                GRXFSIZ_DEPTH_SHIFT;
+       /*
+        * Host specific hardware parameters. Reading these parameters
+        * requires the controller to be in host mode. The mode will
+        * be forced, if necessary, to read these values.
+        */
+       dwc2_get_host_hwparams(hsotg);
+       dwc2_get_dev_hwparams(hsotg);
 
        return 0;
 }
index c4a4d7b..7ae0eef 100644 (file)
@@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
 
                clk = of_clk_get(np, i);
                if (IS_ERR(clk)) {
-                       while (--i >= 0)
+                       while (--i >= 0) {
+                               clk_disable_unprepare(simple->clks[i]);
                                clk_put(simple->clks[i]);
+                       }
                        return PTR_ERR(clk);
                }
 
@@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = {
        .driver         = {
                .name   = "dwc3-of-simple",
                .of_match_table = of_dwc3_simple_match,
+               .pm     = &dwc3_of_simple_dev_pm_ops,
        },
 };
 
index 981fd98..639dd1b 100644 (file)
@@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 {
        const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
        struct dwc3             *dwc = dep->dwc;
-       u32                     timeout = 500;
+       u32                     timeout = 1000;
        u32                     reg;
 
        int                     cmd_status = 0;
@@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
                         */
                        if (speed == USB_SPEED_HIGH) {
                                struct usb_ep *ep = &dep->endpoint;
-                               unsigned int mult = ep->mult - 1;
+                               unsigned int mult = 2;
                                unsigned int maxp = usb_endpoint_maxp(ep->desc);
 
                                if (length <= (2 * maxp))
index eec14e6..77c7ecc 100644 (file)
@@ -146,7 +146,6 @@ int config_ep_by_speed(struct usb_gadget *g,
                        struct usb_function *f,
                        struct usb_ep *_ep)
 {
-       struct usb_composite_dev        *cdev = get_gadget_data(g);
        struct usb_endpoint_descriptor *chosen_desc = NULL;
        struct usb_descriptor_header **speed_desc = NULL;
 
@@ -226,8 +225,12 @@ ep_found:
                        _ep->maxburst = comp_desc->bMaxBurst + 1;
                        break;
                default:
-                       if (comp_desc->bMaxBurst != 0)
+                       if (comp_desc->bMaxBurst != 0) {
+                               struct usb_composite_dev *cdev;
+
+                               cdev = get_gadget_data(g);
                                ERROR(cdev, "ep0 bMaxBurst must be 0\n");
+                       }
                        _ep->maxburst = 1;
                        break;
                }
index 97ea059..b6cf5ab 100644 (file)
@@ -1012,7 +1012,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                else
                        ret = ep->status;
                goto error_mutex;
-       } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
+       } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
                ret = -ENOMEM;
        } else {
                req->buf      = data;
@@ -2282,9 +2282,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                int i;
 
                if (len < sizeof(*d) ||
-                   d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-                   !d->Reserved1)
+                   d->bFirstInterfaceNumber >= ffs->interfaces_count)
                        return -EINVAL;
+               if (d->Reserved1 != 1) {
+                       /*
+                        * According to the spec, Reserved1 must be set to 1
+                        * but older kernels incorrectly rejected non-zero
+                        * values.  We fix it here to avoid returning EINVAL
+                        * in response to values we used to accept.
+                        */
+                       pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
+                       d->Reserved1 = 1;
+               }
                for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
                        if (d->Reserved2[i])
                                return -EINVAL;
index a12fb45..784bf86 100644 (file)
@@ -479,7 +479,7 @@ endif
 # or video class gadget drivers), or specific hardware, here.
 config USB_G_WEBCAM
        tristate "USB Webcam Gadget"
-       depends on VIDEO_DEV
+       depends on VIDEO_V4L2
        select USB_LIBCOMPOSITE
        select VIDEOBUF2_VMALLOC
        select USB_F_UVC
index d39f070..01b44e1 100644 (file)
@@ -642,7 +642,6 @@ static const struct of_device_id bdc_of_match[] = {
 static struct platform_driver bdc_driver = {
        .driver         = {
                .name   = BRCM_BDC_NAME,
-               .owner  = THIS_MODULE,
                .pm = &bdc_pm_ops,
                .of_match_table = bdc_of_match,
        },
index 61422d6..1b3efb1 100644 (file)
@@ -1069,8 +1069,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
 static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
                                            enum usb_device_speed speed)
 {
-       if (udc->gadget->ops->udc_set_speed)
-               udc->gadget->ops->udc_set_speed(udc->gadget, speed);
+       if (udc->gadget->ops->udc_set_speed) {
+               enum usb_device_speed s;
+
+               s = min(speed, udc->gadget->max_speed);
+               udc->gadget->ops->udc_set_speed(udc->gadget, s);
+       }
 }
 
 /**
@@ -1143,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 
        udc = kzalloc(sizeof(*udc), GFP_KERNEL);
        if (!udc)
-               goto err1;
-
-       ret = device_add(&gadget->dev);
-       if (ret)
-               goto err2;
+               goto err_put_gadget;
 
        device_initialize(&udc->dev);
        udc->dev.release = usb_udc_release;
@@ -1156,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
        udc->dev.parent = parent;
        ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
        if (ret)
-               goto err3;
+               goto err_put_udc;
+
+       ret = device_add(&gadget->dev);
+       if (ret)
+               goto err_put_udc;
 
        udc->gadget = gadget;
        gadget->udc = udc;
@@ -1166,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 
        ret = device_add(&udc->dev);
        if (ret)
-               goto err4;
+               goto err_unlist_udc;
 
        usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
        udc->vbus = true;
@@ -1174,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
        /* pick up one of pending gadget drivers */
        ret = check_pending_gadget_drivers(udc);
        if (ret)
-               goto err5;
+               goto err_del_udc;
 
        mutex_unlock(&udc_lock);
 
        return 0;
 
-err5:
+ err_del_udc:
        device_del(&udc->dev);
 
-err4:
+ err_unlist_udc:
        list_del(&udc->list);
        mutex_unlock(&udc_lock);
 
-err3:
-       put_device(&udc->dev);
        device_del(&gadget->dev);
 
-err2:
-       kfree(udc);
+ err_put_udc:
+       put_device(&udc->dev);
 
-err1:
+ err_put_gadget:
        put_device(&gadget->dev);
        return ret;
 }
index bc37f40..6e87af2 100644 (file)
 #define USB3_EP0_SS_MAX_PACKET_SIZE    512
 #define USB3_EP0_HSFS_MAX_PACKET_SIZE  64
 #define USB3_EP0_BUF_SIZE              8
-#define USB3_MAX_NUM_PIPES             30
+#define USB3_MAX_NUM_PIPES             6       /* This includes PIPE 0 */
 #define USB3_WAIT_US                   3
 #define USB3_DMA_NUM_SETTING_AREA      4
 /*
index 19f0042..3ed75aa 100644 (file)
@@ -827,7 +827,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
                        default:                /* unknown */
                                break;
                        }
-                       temp = (cap >> 8) & 0xff;
+                       offset = (cap >> 8) & 0xff;
                }
        }
 #endif
index 4f7895d..e26e685 100644 (file)
@@ -162,7 +162,7 @@ static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
 static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
 {
        dma_addr_t              dma;
-       struct xhci_ring        *ring = s->private;
+       struct xhci_ring        *ring = *(struct xhci_ring **)s->private;
 
        dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
        seq_printf(s, "%pad\n", &dma);
@@ -173,7 +173,7 @@ static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
 static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
 {
        dma_addr_t              dma;
-       struct xhci_ring        *ring = s->private;
+       struct xhci_ring        *ring = *(struct xhci_ring **)s->private;
 
        dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
        seq_printf(s, "%pad\n", &dma);
@@ -183,7 +183,7 @@ static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
 
 static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
 {
-       struct xhci_ring        *ring = s->private;
+       struct xhci_ring        *ring = *(struct xhci_ring **)s->private;
 
        seq_printf(s, "%d\n", ring->cycle_state);
 
@@ -346,7 +346,7 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
 }
 
 static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
-                                                  struct xhci_ring *ring,
+                                                  struct xhci_ring **ring,
                                                   const char *name,
                                                   struct dentry *parent)
 {
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
 
        snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
        epriv->root = xhci_debugfs_create_ring_dir(xhci,
-                                                  dev->eps[ep_index].new_ring,
+                                                  &dev->eps[ep_index].new_ring,
                                                   epriv->name,
                                                   spriv->root);
        spriv->eps[ep_index] = epriv;
@@ -423,7 +423,7 @@ void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
        priv->dev = dev;
        dev->debugfs_private = priv;
 
-       xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring,
+       xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
                                     "ep00", priv->root);
 
        xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
@@ -488,11 +488,11 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
                                   ARRAY_SIZE(xhci_extcap_dbc),
                                   "reg-ext-dbc");
 
-       xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring,
+       xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring,
                                     "command-ring",
                                     xhci->debugfs_root);
 
-       xhci_debugfs_create_ring_dir(xhci, xhci->event_ring,
+       xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
                                     "event-ring",
                                     xhci->debugfs_root);
 
index e1fba46..3a29b32 100644 (file)
@@ -934,6 +934,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
        if (!vdev)
                return;
 
+       if (vdev->real_port == 0 ||
+                       vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+               xhci_dbg(xhci, "Bad vdev->real_port.\n");
+               goto out;
+       }
+
        tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
        list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
                /* is this a hub device that added a tt_info to the tts list */
@@ -947,6 +953,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
                        }
                }
        }
+out:
        /* we are now at a leaf device */
        xhci_debugfs_remove_slot(xhci, slot_id);
        xhci_free_virt_device(xhci, slot_id);
@@ -964,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
                return 0;
        }
 
-       xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
-       if (!xhci->devs[slot_id])
+       dev = kzalloc(sizeof(*dev), flags);
+       if (!dev)
                return 0;
-       dev = xhci->devs[slot_id];
 
        /* Allocate the (output) device context that will be used in the HC. */
        dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1008,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 
        trace_xhci_alloc_virt_device(dev);
 
+       xhci->devs[slot_id] = dev;
+
        return 1;
 fail:
-       xhci_free_virt_device(xhci, slot_id);
+
+       if (dev->in_ctx)
+               xhci_free_container_ctx(xhci, dev->in_ctx);
+       if (dev->out_ctx)
+               xhci_free_container_ctx(xhci, dev->out_ctx);
+       kfree(dev);
+
        return 0;
 }
 
index 7ef1274..1aad89b 100644 (file)
@@ -177,6 +177,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
                xhci->quirks |= XHCI_BROKEN_STREAMS;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+                       pdev->device == 0x0014)
+               xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
                        pdev->device == 0x0015)
                xhci->quirks |= XHCI_RESET_ON_RESUME;
index c239c68..c5cbc68 100644 (file)
@@ -2477,12 +2477,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 */
                if (list_empty(&ep_ring->td_list)) {
                        /*
-                        * A stopped endpoint may generate an extra completion
-                        * event if the device was suspended.  Don't print
-                        * warnings.
+                        * Don't print wanings if it's due to a stopped endpoint
+                        * generating an extra completion event if the device
+                        * was suspended. Or, a event for the last TRB of a
+                        * short TD we already got a short event for.
+                        * The short TD is already removed from the TD list.
                         */
+
                        if (!(trb_comp_code == COMP_STOPPED ||
-                               trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
+                             trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+                             ep_ring->last_td_was_short)) {
                                xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
                                                TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
                                                ep_index);
@@ -3108,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
 {
        u32 maxp, total_packet_count;
 
-       /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
+       /* MTK xHCI 0.96 contains some features from 1.0 */
        if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
                return ((td_total_len - transferred) >> 10);
 
@@ -3117,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
            trb_buff_len == td_total_len)
                return 0;
 
-       /* for MTK xHCI, TD size doesn't include this TRB */
-       if (xhci->quirks & XHCI_MTK_HOST)
+       /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
+       if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
                trb_buff_len = 0;
 
        maxp = usb_endpoint_maxp(&urb->ep->desc);
index 2424d30..da6dbe3 100644 (file)
@@ -3525,8 +3525,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
        struct xhci_slot_ctx *slot_ctx;
        int i, ret;
 
-       xhci_debugfs_remove_slot(xhci, udev->slot_id);
-
 #ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
         * We called pm_runtime_get_noresume when the device was attached.
@@ -3555,8 +3553,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
        }
 
        ret = xhci_disable_slot(xhci, udev->slot_id);
-       if (ret)
+       if (ret) {
+               xhci_debugfs_remove_slot(xhci, udev->slot_id);
                xhci_free_virt_device(xhci, udev->slot_id);
+       }
 }
 
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
index 465dbf6..f723f7b 100644 (file)
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub)
        if (gpio_is_valid(hub->gpio_reset)) {
                err = devm_gpio_request_one(dev, hub->gpio_reset,
                                GPIOF_OUT_INIT_LOW, "usb3503 reset");
+               /* Datasheet defines a hardware reset to be at least 100us */
+               usleep_range(100, 10000);
                if (err) {
                        dev_err(dev,
                                "unable to request GPIO %d as reset pin (%d)\n",
index f6ae753..f932f40 100644 (file)
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
                break;
 
        case MON_IOCQ_RING_SIZE:
+               mutex_lock(&rp->fetch_lock);
                ret = rp->b_size;
+               mutex_unlock(&rp->fetch_lock);
                break;
 
        case MON_IOCT_RING_SIZE:
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf)
        unsigned long offset, chunk_idx;
        struct page *pageptr;
 
+       mutex_lock(&rp->fetch_lock);
        offset = vmf->pgoff << PAGE_SHIFT;
-       if (offset >= rp->b_size)
+       if (offset >= rp->b_size) {
+               mutex_unlock(&rp->fetch_lock);
                return VM_FAULT_SIGBUS;
+       }
        chunk_idx = offset / CHUNK_SIZE;
        pageptr = rp->b_vec[chunk_idx].pg;
        get_page(pageptr);
+       mutex_unlock(&rp->fetch_lock);
        vmf->page = pageptr;
        return 0;
 }
index 0397606..6c036de 100644 (file)
@@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
                        musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
                        portstate(musb->port1_status |= USB_PORT_STAT_POWER);
                        del_timer(&musb->dev_timer);
-               } else {
+               } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
+                       /*
+                        * When babble condition happens, drvvbus interrupt
+                        * is also generated. Ignore this drvvbus interrupt
+                        * and let babble interrupt handler recovers the
+                        * controller; otherwise, the host-mode flag is lost
+                        * due to the MUSB_DEV_MODE() call below and babble
+                        * recovery logic will not be called.
+                        */
                        musb->is_active = 0;
                        MUSB_DEV_MODE(musb);
                        otg->default_a = 0;
index 7c6273b..06d502b 100644 (file)
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+       { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
        { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
        { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+       { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
        { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
        { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
        { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
index 1aba910..fc68952 100644 (file)
@@ -1013,6 +1013,7 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
        { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+       { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
        { }                                     /* Terminating entry */
 };
 
index 4faa09f..8b4ecd2 100644 (file)
 #define ICPDAS_I7561U_PID              0x0104
 #define ICPDAS_I7563U_PID              0x0105
 
+/*
+ * Airbus Defence and Space
+ */
+#define AIRBUS_DS_VID                  0x1e8e  /* Vendor ID */
+#define AIRBUS_DS_P8GR                 0x6001  /* Tetra P8GR */
+
 /*
  * RT Systems programming cables for various ham radios
  */
index aaa7d90..b6320e3 100644 (file)
@@ -233,11 +233,14 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Qualcomm's vendor ID */
 #define QUECTEL_PRODUCT_UC20                   0x9003
 #define QUECTEL_PRODUCT_UC15                   0x9090
+/* These Yuga products use Qualcomm's vendor ID */
+#define YUGA_PRODUCT_CLM920_NC5                        0x9625
 
 #define QUECTEL_VENDOR_ID                      0x2c7c
 /* These Quectel products use Quectel's vendor ID */
 #define QUECTEL_PRODUCT_EC21                   0x0121
 #define QUECTEL_PRODUCT_EC25                   0x0125
+#define QUECTEL_PRODUCT_BG96                   0x0296
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -279,6 +282,7 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_LE922_USBCFG3            0x1043
 #define TELIT_PRODUCT_LE922_USBCFG5            0x1045
 #define TELIT_PRODUCT_ME910                    0x1100
+#define TELIT_PRODUCT_ME910_DUAL_MODEM         0x1101
 #define TELIT_PRODUCT_LE920                    0x1200
 #define TELIT_PRODUCT_LE910                    0x1201
 #define TELIT_PRODUCT_LE910_USBCFG4            0x1206
@@ -644,6 +648,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
        .reserved = BIT(1) | BIT(3),
 };
 
+static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
+       .sendsetup = BIT(0),
+       .reserved = BIT(3),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
        .sendsetup = BIT(0),
        .reserved = BIT(1) | BIT(2),
@@ -673,6 +682,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
        .reserved = BIT(4) | BIT(5),
 };
 
+static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
+       .reserved = BIT(1) | BIT(4),
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1177,11 +1190,16 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
        { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       /* Yuga products use Qualcomm vendor ID */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
+         .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
        /* Quectel products using Quectel vendor ID */
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1241,6 +1259,8 @@ static const struct usb_device_id option_ids[] = {
                .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
                .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+               .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
                .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
index e389254..613f91a 100644 (file)
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x9079)},   /* Sierra Wireless EM74xx */
        {DEVICE_SWI(0x1199, 0x907a)},   /* Sierra Wireless EM74xx QDL */
        {DEVICE_SWI(0x1199, 0x907b)},   /* Sierra Wireless EM74xx */
+       {DEVICE_SWI(0x1199, 0x9090)},   /* Sierra Wireless EM7565 QDL */
+       {DEVICE_SWI(0x1199, 0x9091)},   /* Sierra Wireless EM7565 */
        {DEVICE_SWI(0x413c, 0x81a2)},   /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a3)},   /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -342,6 +344,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                        break;
                case 2:
                        dev_dbg(dev, "NMEA GPS interface found\n");
+                       sendsetup = true;
                        break;
                case 3:
                        dev_dbg(dev, "Modem port found\n");
index ab5a2ac..aaf4813 100644 (file)
@@ -31,12 +31,14 @@ static const struct usb_device_id id_table[] = {
 };
 
 static const struct usb_device_id dbc_id_table[] = {
+       { USB_DEVICE(0x1d6b, 0x0010) },
        { USB_DEVICE(0x1d6b, 0x0011) },
        { },
 };
 
 static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(0x0525, 0x127a) },
+       { USB_DEVICE(0x1d6b, 0x0010) },
        { USB_DEVICE(0x1d6b, 0x0011) },
        { },
 };
index 1fcd758..3734a25 100644 (file)
@@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
                }
        }
 
+       /* All Seagate disk enclosures have broken ATA pass-through support */
+       if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
+               flags |= US_FL_NO_ATA_1X;
+
        usb_stor_adjust_quirks(udev, &flags);
 
        if (flags & US_FL_IGNORE_UAS) {
index 2968046..f72d045 100644 (file)
@@ -2100,6 +2100,13 @@ UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0116,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BROKEN_FUA ),
 
+/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+               "JMicron",
+               "JMS567",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BROKEN_FUA),
+
 /*
  * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
  * JMicron responds to USN and several other SCSI ioctls with a
index d520374..a7d08ae 100644 (file)
@@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
 
+/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+               "JMicron",
+               "JMS567",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BROKEN_FUA),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                "VIA",
@@ -136,6 +143,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
+UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
+               "Norelsys",
+               "NS1068X",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
+
 /* Reported-by: Takeo Nakayama <javhera@gmx.com> */
 UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
                "JMicron",
index 465d7da..bcb2744 100644 (file)
@@ -1,13 +1,53 @@
 
-menu "USB Power Delivery and Type-C drivers"
+menuconfig TYPEC
+       tristate "USB Type-C Support"
+       help
+         USB Type-C Specification defines a cable and connector for USB where
+         only one type of plug is supported on both ends, i.e. there will not
+         be Type-A plug on one end of the cable and Type-B plug on the other.
+         Determination of the host-to-device relationship happens through a
+         specific Configuration Channel (CC) which goes through the USB Type-C
+         cable. The Configuration Channel may also be used to detect optional
+         Accessory Modes - Analog Audio and Debug - and if USB Power Delivery
+         is supported, the Alternate Modes, where the connector is used for
+         something else then USB communication.
+
+         USB Power Delivery Specification defines a protocol that can be used
+         to negotiate the voltage and current levels with the connected
+         partners. USB Power Delivery allows higher voltages then the normal
+         5V, up to 20V, and current up to 5A over the cable. The USB Power
+         Delivery protocol is also used to negotiate the optional Alternate
+         Modes when they are supported. USB Power Delivery does not depend on
+         USB Type-C connector, however it is mostly used together with USB
+         Type-C connectors.
+
+         USB Type-C and USB Power Delivery Specifications define a set of state
+         machines that need to be implemented in either software or firmware.
+         Simple USB Type-C PHYs, for example USB Type-C Port Controller
+         Interface Specification compliant "Port Controllers" need the state
+         machines to be handled in the OS, but stand-alone USB Type-C and Power
+         Delivery controllers handle the state machines inside their firmware.
+         The USB Type-C and Power Delivery controllers usually function
+         autonomously, and do not necessarily require drivers.
+
+         Enable this configurations option if you have USB Type-C connectors on
+         your system and 1) you know your USB Type-C hardware requires OS
+         control (a driver) to function, or 2) if you need to be able to read
+         the status of the USB Type-C ports in your system, or 3) if you need
+         to be able to swap the power role (decide are you supplying or
+         consuming power over the cable) or data role (host or device) when
+         both roles are supported.
+
+         For more information, see the kernel documentation for USB Type-C
+         Connector Class API (Documentation/driver-api/usb/typec.rst)
+         <https://www.kernel.org/doc/html/latest/driver-api/usb/typec.html>
+         and ABI (Documentation/ABI/testing/sysfs-class-typec).
 
-config TYPEC
-       tristate
+if TYPEC
 
 config TYPEC_TCPM
        tristate "USB Type-C Port Controller Manager"
        depends on USB
-       select TYPEC
        help
          The Type-C Port Controller Manager provides a USB PD and USB Type-C
          state machine for use with Type-C Port Controllers.
@@ -22,7 +62,6 @@ config TYPEC_WCOVE
        depends on INTEL_SOC_PMIC
        depends on INTEL_PMC_IPC
        depends on BXT_WC_PMIC_OPREGION
-       select TYPEC
        help
          This driver adds support for USB Type-C detection on Intel Broxton
          platforms that have Intel Whiskey Cove PMIC. The driver can detect the
@@ -31,14 +70,13 @@ config TYPEC_WCOVE
          To compile this driver as module, choose M here: the module will be
          called typec_wcove
 
-endif
+endif # TYPEC_TCPM
 
 source "drivers/usb/typec/ucsi/Kconfig"
 
 config TYPEC_TPS6598X
        tristate "TI TPS6598x USB Power Delivery controller driver"
        depends on I2C
-       select TYPEC
        help
          Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
          Delivery controller.
@@ -46,4 +84,4 @@ config TYPEC_TPS6598X
          If you choose to build this driver as a dynamically linked module, the
          module will be called tps6598x.ko.
 
-endmenu
+endif # TYPEC
index d0c31ce..e36d6c7 100644 (file)
@@ -1,7 +1,6 @@
 config TYPEC_UCSI
        tristate "USB Type-C Connector System Software Interface driver"
        depends on !CPU_BIG_ENDIAN
-       select TYPEC
        help
          USB Type-C Connector System Software Interface (UCSI) is a
          specification for an interface that allows the operating system to
index a3df8ee..e31a6f2 100644 (file)
@@ -149,8 +149,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
         * step 1?
         */
        if (ud->tcp_socket) {
-               dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
-                       ud->tcp_socket);
+               dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
                kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
        }
 
index 4f48b30..c31c840 100644 (file)
@@ -237,11 +237,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
        struct stub_priv *priv;
        struct urb *urb;
 
-       dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
+       dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
 
        while ((priv = stub_priv_pop(sdev))) {
                urb = priv->urb;
-               dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
+               dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
+                       priv->seqnum);
                usb_kill_urb(urb);
 
                kmem_cache_free(stub_priv_cache, priv);
index 536e037..6c5a593 100644 (file)
@@ -211,9 +211,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
                if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
                        continue;
 
-               dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
-                        priv->urb);
-
                /*
                 * This matched urb is not completed yet (i.e., be in
                 * flight in usb hcd hardware/driver). Now we are
@@ -252,8 +249,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
                ret = usb_unlink_urb(priv->urb);
                if (ret != -EINPROGRESS)
                        dev_err(&priv->urb->dev->dev,
-                               "failed to unlink a urb %p, ret %d\n",
-                               priv->urb, ret);
+                               "failed to unlink a urb # %lu, ret %d\n",
+                               priv->seqnum, ret);
 
                return 0;
        }
@@ -322,23 +319,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
        return priv;
 }
 
-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
 {
        struct usb_device *udev = sdev->udev;
        struct usb_host_endpoint *ep;
        struct usb_endpoint_descriptor *epd = NULL;
+       int epnum = pdu->base.ep;
+       int dir = pdu->base.direction;
+
+       if (epnum < 0 || epnum > 15)
+               goto err_ret;
 
        if (dir == USBIP_DIR_IN)
                ep = udev->ep_in[epnum & 0x7f];
        else
                ep = udev->ep_out[epnum & 0x7f];
-       if (!ep) {
-               dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
-                       epnum);
-               BUG();
-       }
+       if (!ep)
+               goto err_ret;
 
        epd = &ep->desc;
+
        if (usb_endpoint_xfer_control(epd)) {
                if (dir == USBIP_DIR_OUT)
                        return usb_sndctrlpipe(udev, epnum);
@@ -361,15 +361,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
        }
 
        if (usb_endpoint_xfer_isoc(epd)) {
+               /* validate packet size and number of packets */
+               unsigned int maxp, packets, bytes;
+
+               maxp = usb_endpoint_maxp(epd);
+               maxp *= usb_endpoint_maxp_mult(epd);
+               bytes = pdu->u.cmd_submit.transfer_buffer_length;
+               packets = DIV_ROUND_UP(bytes, maxp);
+
+               if (pdu->u.cmd_submit.number_of_packets < 0 ||
+                   pdu->u.cmd_submit.number_of_packets > packets) {
+                       dev_err(&sdev->udev->dev,
+                               "CMD_SUBMIT: isoc invalid num packets %d\n",
+                               pdu->u.cmd_submit.number_of_packets);
+                       return -1;
+               }
                if (dir == USBIP_DIR_OUT)
                        return usb_sndisocpipe(udev, epnum);
                else
                        return usb_rcvisocpipe(udev, epnum);
        }
 
+err_ret:
        /* NOT REACHED */
-       dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
-       return 0;
+       dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
+       return -1;
 }
 
 static void masking_bogus_flags(struct urb *urb)
@@ -433,7 +449,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
        struct stub_priv *priv;
        struct usbip_device *ud = &sdev->ud;
        struct usb_device *udev = sdev->udev;
-       int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
+       int pipe = get_pipe(sdev, pdu);
+
+       if (pipe == -1)
+               return;
 
        priv = stub_priv_alloc(sdev, pdu);
        if (!priv)
index b18bce9..f0ec41a 100644 (file)
@@ -88,7 +88,7 @@ void stub_complete(struct urb *urb)
        /* link a urb to the queue of tx. */
        spin_lock_irqsave(&sdev->priv_lock, flags);
        if (sdev->ud.tcp_socket == NULL) {
-               usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
+               usbip_dbg_stub_tx("ignore urb for closed connection\n");
                /* It will be freed in stub_device_cleanup_urbs(). */
        } else if (priv->unlinking) {
                stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
@@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
                memset(&pdu_header, 0, sizeof(pdu_header));
                memset(&msg, 0, sizeof(msg));
 
+               if (urb->actual_length > 0 && !urb->transfer_buffer) {
+                       dev_err(&sdev->udev->dev,
+                               "urb: actual_length %d transfer_buffer null\n",
+                               urb->actual_length);
+                       return -1;
+               }
+
                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
                        iovnum = 2 + urb->number_of_packets;
                else
@@ -183,8 +190,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
 
                /* 1. setup usbip_header */
                setup_ret_submit_pdu(&pdu_header, urb);
-               usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
-                                 pdu_header.base.seqnum, urb);
+               usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+                                 pdu_header.base.seqnum);
                usbip_header_correct_endian(&pdu_header, 1);
 
                iov[iovnum].iov_base = &pdu_header;
index f797893..ee2bbce 100644 (file)
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
        dev_dbg(dev, "       devnum(%d) devpath(%s) usb speed(%s)",
                udev->devnum, udev->devpath, usb_speed_string(udev->speed));
 
-       pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
+       pr_debug("tt hub ttport %d\n", udev->ttport);
 
        dev_dbg(dev, "                    ");
        for (i = 0; i < 16; i++)
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
        }
        pr_debug("\n");
 
-       dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
-
-       dev_dbg(dev,
-               "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
-               &udev->descriptor, udev->config,
-               udev->actconfig, udev->rawdescriptors);
+       dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
+               udev->bus->bus_name);
 
        dev_dbg(dev, "have_langid %d, string_langid %d\n",
                udev->have_langid, udev->string_langid);
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb)
 
        dev = &urb->dev->dev;
 
-       dev_dbg(dev, "   urb                   :%p\n", urb);
-       dev_dbg(dev, "   dev                   :%p\n", urb->dev);
-
        usbip_dump_usb_device(urb->dev);
 
        dev_dbg(dev, "   pipe                  :%08x ", urb->pipe);
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb)
 
        dev_dbg(dev, "   status                :%d\n", urb->status);
        dev_dbg(dev, "   transfer_flags        :%08X\n", urb->transfer_flags);
-       dev_dbg(dev, "   transfer_buffer       :%p\n", urb->transfer_buffer);
        dev_dbg(dev, "   transfer_buffer_length:%d\n",
                                                urb->transfer_buffer_length);
        dev_dbg(dev, "   actual_length         :%d\n", urb->actual_length);
-       dev_dbg(dev, "   setup_packet          :%p\n", urb->setup_packet);
 
        if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
                usbip_dump_usb_ctrlrequest(
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb)
        dev_dbg(dev, "   number_of_packets     :%d\n", urb->number_of_packets);
        dev_dbg(dev, "   interval              :%d\n", urb->interval);
        dev_dbg(dev, "   error_count           :%d\n", urb->error_count);
-       dev_dbg(dev, "   context               :%p\n", urb->context);
-       dev_dbg(dev, "   complete              :%p\n", urb->complete);
 }
 EXPORT_SYMBOL_GPL(usbip_dump_urb);
 
@@ -317,26 +306,20 @@ int usbip_recv(struct socket *sock, void *buf, int size)
        struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
        int total = 0;
 
+       if (!sock || !buf || !size)
+               return -EINVAL;
+
        iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
 
        usbip_dbg_xmit("enter\n");
 
-       if (!sock || !buf || !size) {
-               pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
-                      size);
-               return -EINVAL;
-       }
-
        do {
-               int sz = msg_data_left(&msg);
+               msg_data_left(&msg);
                sock->sk->sk_allocation = GFP_NOIO;
 
                result = sock_recvmsg(sock, &msg, MSG_WAITALL);
-               if (result <= 0) {
-                       pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
-                                sock, buf + total, sz, result, total);
+               if (result <= 0)
                        goto err;
-               }
 
                total += result;
        } while (msg_data_left(&msg));
index e5de35c..473fb8a 100644 (file)
@@ -256,6 +256,7 @@ struct usbip_device {
        /* lock for status */
        spinlock_t lock;
 
+       int sockfd;
        struct socket *tcp_socket;
 
        struct task_struct *tcp_rx;
index 713e941..c3e1008 100644 (file)
@@ -656,9 +656,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
        struct vhci_device *vdev;
        unsigned long flags;
 
-       usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
-                         hcd, urb, mem_flags);
-
        if (portnum > VHCI_HC_PORTS) {
                pr_err("invalid port number %d\n", portnum);
                return -ENODEV;
@@ -822,8 +819,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
        struct vhci_device *vdev;
        unsigned long flags;
 
-       pr_info("dequeue a urb %p\n", urb);
-
        spin_lock_irqsave(&vhci->lock, flags);
 
        priv = urb->hcpriv;
@@ -851,7 +846,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                /* tcp connection is closed */
                spin_lock(&vdev->priv_lock);
 
-               pr_info("device %p seems to be disconnected\n", vdev);
                list_del(&priv->list);
                kfree(priv);
                urb->hcpriv = NULL;
@@ -863,8 +857,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                 * vhci_rx will receive RET_UNLINK and give back the URB.
                 * Otherwise, we give back it here.
                 */
-               pr_info("gives back urb %p\n", urb);
-
                usb_hcd_unlink_urb_from_ep(hcd, urb);
 
                spin_unlock_irqrestore(&vhci->lock, flags);
@@ -892,8 +884,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 
                unlink->unlink_seqnum = priv->seqnum;
 
-               pr_info("device %p seems to be still connected\n", vdev);
-
                /* send cmd_unlink and try to cancel the pending URB in the
                 * peer */
                list_add_tail(&unlink->list, &vdev->unlink_tx);
@@ -975,7 +965,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
 
        /* need this? see stub_dev.c */
        if (ud->tcp_socket) {
-               pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
+               pr_debug("shutdown tcp_socket %d\n", ud->sockfd);
                kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
        }
 
@@ -1098,7 +1088,6 @@ static int hcd_name_to_id(const char *name)
 static int vhci_setup(struct usb_hcd *hcd)
 {
        struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
-       hcd->self.sg_tablesize = ~0;
        if (usb_hcd_is_primary_hcd(hcd)) {
                vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
                vhci->vhci_hcd_hs->vhci = vhci;
index 90577e8..112ebb9 100644 (file)
@@ -23,24 +23,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
                urb = priv->urb;
                status = urb->status;
 
-               usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
-                               urb, priv, seqnum);
+               usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
 
                switch (status) {
                case -ENOENT:
                        /* fall through */
                case -ECONNRESET:
-                       dev_info(&urb->dev->dev,
-                                "urb %p was unlinked %ssynchronuously.\n", urb,
-                                status == -ENOENT ? "" : "a");
+                       dev_dbg(&urb->dev->dev,
+                                "urb seq# %u was unlinked %ssynchronuously\n",
+                                seqnum, status == -ENOENT ? "" : "a");
                        break;
                case -EINPROGRESS:
                        /* no info output */
                        break;
                default:
-                       dev_info(&urb->dev->dev,
-                                "urb %p may be in a error, status %d\n", urb,
-                                status);
+                       dev_dbg(&urb->dev->dev,
+                                "urb seq# %u may be in a error, status %d\n",
+                                seqnum, status);
                }
 
                list_del(&priv->list);
@@ -67,8 +66,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
        spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
        if (!urb) {
-               pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
-               pr_info("max seqnum %d\n",
+               pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+                       pdu->base.seqnum,
                        atomic_read(&vhci_hcd->seqnum));
                usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
                return;
@@ -91,7 +90,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
        if (usbip_dbg_flag_vhci_rx)
                usbip_dump_urb(urb);
 
-       usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+       usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
 
        spin_lock_irqsave(&vhci->lock, flags);
        usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
@@ -158,7 +157,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                pr_info("the urb (seqnum %d) was already given back\n",
                        pdu->base.seqnum);
        } else {
-               usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+               usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
 
                /* If unlink is successful, status is -ECONNRESET */
                urb->status = pdu->u.ret_unlink.status;
index e78f747..091f76b 100644 (file)
 
 /*
  * output example:
- * hub port sta spd dev      socket           local_busid
- * hs  0000 004 000 00000000         c5a7bb80 1-2.3
+ * hub port sta spd dev       sockfd    local_busid
+ * hs  0000 004 000 00000000  3         1-2.3
  * ................................................
- * ss  0008 004 000 00000000         d8cee980 2-3.4
+ * ss  0008 004 000 00000000  4         2-3.4
  * ................................................
  *
- * IP address can be retrieved from a socket pointer address by looking
- * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
- * port number and its peer IP address.
+ * Output includes socket fd instead of socket pointer address to avoid
+ * leaking kernel memory address in:
+ *     /sys/devices/platform/vhci_hcd.0/status and in debug output.
+ * The socket pointer address is not used at the moment and it was made
+ * visible as a convenient way to find IP address from socket pointer
+ * address by looking up /proc/net/{tcp,tcp6}. As this opens a security
+ * hole, the change is made to use sockfd instead.
+ *
  */
 static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
 {
@@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
        if (vdev->ud.status == VDEV_ST_USED) {
                *out += sprintf(*out, "%03u %08x ",
                                      vdev->speed, vdev->devid);
-               *out += sprintf(*out, "%16p %s",
-                                     vdev->ud.tcp_socket,
+               *out += sprintf(*out, "%u %s",
+                                     vdev->ud.sockfd,
                                      dev_name(&vdev->udev->dev));
 
        } else {
@@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
        char *s = out;
 
        /*
-        * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2.
+        * Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
+        * thus the * 2.
         */
        out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
        return out - s;
@@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
 
        vdev->devid         = devid;
        vdev->speed         = speed;
+       vdev->ud.sockfd     = sockfd;
        vdev->ud.tcp_socket = socket;
        vdev->ud.status     = VDEV_ST_NOTASSIGNED;
 
index d625a2f..9aed15a 100644 (file)
@@ -69,7 +69,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
                memset(&msg, 0, sizeof(msg));
                memset(&iov, 0, sizeof(iov));
 
-               usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
+               usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
+                                 priv->seqnum);
 
                /* 1. setup usbip_header */
                setup_cmd_submit_pdu(&pdu_header, urb);
index df1e309..1e8a23d 100644 (file)
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc,
        urb_p->new = 1;
        urb_p->seqnum = pdu->base.seqnum;
 
+       if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
+               /* validate packet size and number of packets */
+               unsigned int maxp, packets, bytes;
+
+               maxp = usb_endpoint_maxp(urb_p->ep->desc);
+               maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
+               bytes = pdu->u.cmd_submit.transfer_buffer_length;
+               packets = DIV_ROUND_UP(bytes, maxp);
+
+               if (pdu->u.cmd_submit.number_of_packets < 0 ||
+                   pdu->u.cmd_submit.number_of_packets > packets) {
+                       dev_err(&udc->gadget.dev,
+                               "CMD_SUBMIT: isoc invalid num packets %d\n",
+                               pdu->u.cmd_submit.number_of_packets);
+                       ret = -EMSGSIZE;
+                       goto free_urbp;
+               }
+       }
+
        ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
        if (ret) {
                usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
index 1440ae0..3ccb17c 100644 (file)
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
        memset(&pdu_header, 0, sizeof(pdu_header));
        memset(&msg, 0, sizeof(msg));
 
+       if (urb->actual_length > 0 && !urb->transfer_buffer) {
+               dev_err(&udc->gadget.dev,
+                       "urb: actual_length %d transfer_buffer null\n",
+                       urb->actual_length);
+               return -1;
+       }
+
        if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
                iovnum = 2 + urb->number_of_packets;
        else
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
 
        /* 1. setup usbip_header */
        setup_ret_submit_pdu(&pdu_header, urb_p);
-       usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
-                         pdu_header.base.seqnum, urb);
+       usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+                         pdu_header.base.seqnum);
        usbip_header_correct_endian(&pdu_header, 1);
 
        iov[iovnum].iov_base = &pdu_header;
index 8d626d7..c7bdeb6 100644 (file)
@@ -778,16 +778,6 @@ static void handle_rx(struct vhost_net *net)
                /* On error, stop handling until the next kick. */
                if (unlikely(headcount < 0))
                        goto out;
-               if (nvq->rx_array)
-                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
-               /* On overrun, truncate and discard */
-               if (unlikely(headcount > UIO_MAXIOV)) {
-                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
-                       err = sock->ops->recvmsg(sock, &msg,
-                                                1, MSG_DONTWAIT | MSG_TRUNC);
-                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
-                       continue;
-               }
                /* OK, now we need to know about added descriptors. */
                if (!headcount) {
                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -800,6 +790,16 @@ static void handle_rx(struct vhost_net *net)
                         * they refilled. */
                        goto out;
                }
+               if (nvq->rx_array)
+                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+               /* On overrun, truncate and discard */
+               if (unlikely(headcount > UIO_MAXIOV)) {
+                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
+                       err = sock->ops->recvmsg(sock, &msg,
+                                                1, MSG_DONTWAIT | MSG_TRUNC);
+                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
+                       continue;
+               }
                /* We don't need to be notified again. */
                iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
                fixup = msg.msg_iter;
index 33ac2b1..5727b18 100644 (file)
@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
 {
        int i = 0;
        for (i = 0; i < d->nvqs; ++i)
-               mutex_lock(&d->vqs[i]->mutex);
+               mutex_lock_nested(&d->vqs[i]->mutex, i);
 }
 
 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
@@ -1015,6 +1015,10 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
                vhost_iotlb_notify_vq(dev, msg);
                break;
        case VHOST_IOTLB_INVALIDATE:
+               if (!dev->iotlb) {
+                       ret = -EFAULT;
+                       break;
+               }
                vhost_vq_meta_reset(dev);
                vhost_del_umem_range(dev->iotlb, msg->iova,
                                     msg->iova + msg->size - 1);
index d843296..6a34ab9 100644 (file)
@@ -143,7 +143,7 @@ static int apple_bl_add(struct acpi_device *dev)
        struct pci_dev *host;
        int intensity;
 
-       host = pci_get_bus_and_slot(0, 0);
+       host = pci_get_domain_bus_and_slot(0, 0, 0);
 
        if (!host) {
                pr_err("unable to find PCI host\n");
index d7c239e..f557406 100644 (file)
@@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
        struct spi_message msg;
        struct spi_transfer xfer = {
                .len            = 1,
-               .cs_change      = 1,
+               .cs_change      = 0,
                .tx_buf         = lcd->buf,
        };
 
index eab1f84..e4bd63e 100644 (file)
@@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
 
        spi_message_init(m);
 
-       x->cs_change = 1;
+       x->cs_change = 0;
        x->tx_buf = &lcd->buf[0];
        spi_message_add_tail(x, m);
 
index 6a41ea9..4dc5ee8 100644 (file)
@@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
        struct spi_message msg;
        struct spi_transfer xfer = {
                .len            = 1,
-               .cs_change      = 1,
+               .cs_change      = 0,
                .tx_buf         = buf,
        };
 
index 48230a5..bf7ff39 100644 (file)
@@ -333,6 +333,8 @@ int register_virtio_device(struct virtio_device *dev)
        /* device_register() causes the bus infrastructure to look for a
         * matching driver. */
        err = device_register(&dev->dev);
+       if (err)
+               ida_simple_remove(&virtio_index_ida, dev->index);
 out:
        if (err)
                virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
index 7960746..a1fb52c 100644 (file)
@@ -174,13 +174,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
        while ((page = balloon_page_pop(&pages))) {
                balloon_page_enqueue(&vb->vb_dev_info, page);
 
-               vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
-
                set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
                vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
                if (!virtio_has_feature(vb->vdev,
                                        VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
                        adjust_managed_page_count(page, -1);
+               vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
        }
 
        num_allocated_pages = vb->num_pfns;
index 74dc717..c92131e 100644 (file)
@@ -493,7 +493,16 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
 };
 
 
-static void virtio_mmio_release_dev_empty(struct device *_d) {}
+static void virtio_mmio_release_dev(struct device *_d)
+{
+       struct virtio_device *vdev =
+                       container_of(_d, struct virtio_device, dev);
+       struct virtio_mmio_device *vm_dev =
+                       container_of(vdev, struct virtio_mmio_device, vdev);
+       struct platform_device *pdev = vm_dev->pdev;
+
+       devm_kfree(&pdev->dev, vm_dev);
+}
 
 /* Platform device */
 
@@ -514,10 +523,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
 
        vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
        if (!vm_dev)
-               return  -ENOMEM;
+               return -ENOMEM;
 
        vm_dev->vdev.dev.parent = &pdev->dev;
-       vm_dev->vdev.dev.release = virtio_mmio_release_dev_empty;
+       vm_dev->vdev.dev.release = virtio_mmio_release_dev;
        vm_dev->vdev.config = &virtio_mmio_config_ops;
        vm_dev->pdev = pdev;
        INIT_LIST_HEAD(&vm_dev->virtqueues);
@@ -573,13 +582,16 @@ static int virtio_mmio_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, vm_dev);
 
-       return register_virtio_device(&vm_dev->vdev);
+       rc = register_virtio_device(&vm_dev->vdev);
+       if (rc)
+               put_device(&vm_dev->vdev.dev);
+
+       return rc;
 }
 
 static int virtio_mmio_remove(struct platform_device *pdev)
 {
        struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
-
        unregister_virtio_device(&vm_dev->vdev);
 
        return 0;
index ca200d1..5bf613d 100644 (file)
@@ -223,6 +223,13 @@ config ZIIRAVE_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called ziirave_wdt.
 
+config RAVE_SP_WATCHDOG
+       tristate "RAVE SP Watchdog timer"
+       depends on RAVE_SP_CORE
+       select WATCHDOG_CORE
+       help
+         Support for the watchdog on RAVE SP device.
+
 # ALPHA Architecture
 
 # ARM Architecture
index 715a210..135c5e8 100644 (file)
@@ -224,3 +224,4 @@ obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
 obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
 obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
 obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
+obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
diff --git a/drivers/watchdog/rave-sp-wdt.c b/drivers/watchdog/rave-sp-wdt.c
new file mode 100644 (file)
index 0000000..35db173
--- /dev/null
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Driver for watchdog aspect of for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovation
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+enum {
+       RAVE_SP_RESET_BYTE = 1,
+       RAVE_SP_RESET_REASON_NORMAL = 0,
+       RAVE_SP_RESET_DELAY_MS = 500,
+};
+
+/**
+ * struct rave_sp_wdt_variant - RAVE SP watchdog variant
+ *
+ * @max_timeout:       Largest possible watchdog timeout setting
+ * @min_timeout:       Smallest possible watchdog timeout setting
+ *
+ * @configure:         Function to send configuration command
+ * @restart:           Function to send "restart" command
+ */
+struct rave_sp_wdt_variant {
+       unsigned int max_timeout;
+       unsigned int min_timeout;
+
+       int (*configure)(struct watchdog_device *, bool);
+       int (*restart)(struct watchdog_device *);
+};
+
+/**
+ * struct rave_sp_wdt - RAVE SP watchdog
+ *
+ * @wdd:               Underlying watchdog device
+ * @sp:                        Pointer to parent RAVE SP device
+ * @variant:           Device specific variant information
+ * @reboot_notifier:   Reboot notifier implementing machine reset
+ */
+struct rave_sp_wdt {
+       struct watchdog_device wdd;
+       struct rave_sp *sp;
+       const struct rave_sp_wdt_variant *variant;
+       struct notifier_block reboot_notifier;
+};
+
+static struct rave_sp_wdt *to_rave_sp_wdt(struct watchdog_device *wdd)
+{
+       return container_of(wdd, struct rave_sp_wdt, wdd);
+}
+
+static int rave_sp_wdt_exec(struct watchdog_device *wdd, void *data,
+                           size_t data_size)
+{
+       return rave_sp_exec(to_rave_sp_wdt(wdd)->sp,
+                           data, data_size, NULL, 0);
+}
+
+static int rave_sp_wdt_legacy_configure(struct watchdog_device *wdd, bool on)
+{
+       u8 cmd[] = {
+               [0] = RAVE_SP_CMD_SW_WDT,
+               [1] = 0,
+               [2] = 0,
+               [3] = on,
+               [4] = on ? wdd->timeout : 0,
+       };
+
+       return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_configure(struct watchdog_device *wdd, bool on)
+{
+       u8 cmd[] = {
+               [0] = RAVE_SP_CMD_SW_WDT,
+               [1] = 0,
+               [2] = on,
+               [3] = (u8)wdd->timeout,
+               [4] = (u8)(wdd->timeout >> 8),
+       };
+
+       return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+/**
+ * rave_sp_wdt_configure - Configure watchdog device
+ *
+ * @wdd:       Device to configure
+ * @on:                Desired state of the watchdog timer (ON/OFF)
+ *
+ * This function configures two aspects of the watchdog timer:
+ *
+ *  - Wheither it is ON or OFF
+ *  - Its timeout duration
+ *
+ * with first aspect specified via function argument and second via
+ * the value of 'wdd->timeout'.
+ */
+static int rave_sp_wdt_configure(struct watchdog_device *wdd, bool on)
+{
+       return to_rave_sp_wdt(wdd)->variant->configure(wdd, on);
+}
+
+static int rave_sp_wdt_legacy_restart(struct watchdog_device *wdd)
+{
+       u8 cmd[] = {
+               [0] = RAVE_SP_CMD_RESET,
+               [1] = 0,
+               [2] = RAVE_SP_RESET_BYTE
+       };
+
+       return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_restart(struct watchdog_device *wdd)
+{
+       u8 cmd[] = {
+               [0] = RAVE_SP_CMD_RESET,
+               [1] = 0,
+               [2] = RAVE_SP_RESET_BYTE,
+               [3] = RAVE_SP_RESET_REASON_NORMAL
+       };
+
+       return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_reboot_notifier(struct notifier_block *nb,
+                                      unsigned long action, void *data)
+{
+       /*
+        * Restart handler is called in atomic context which means we
+        * can't communicate to SP via UART. Luckily for use SP will
+        * wait 500ms before actually resetting us, so we ask it to do
+        * so here and let the rest of the system go on wrapping
+        * things up.
+        */
+       if (action == SYS_DOWN || action == SYS_HALT) {
+               struct rave_sp_wdt *sp_wd =
+                       container_of(nb, struct rave_sp_wdt, reboot_notifier);
+
+               const int ret = sp_wd->variant->restart(&sp_wd->wdd);
+
+               if (ret < 0)
+                       dev_err(sp_wd->wdd.parent,
+                               "Failed to issue restart command (%d)", ret);
+               return NOTIFY_OK;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static int rave_sp_wdt_restart(struct watchdog_device *wdd,
+                              unsigned long action, void *data)
+{
+       /*
+        * The actual work was done by reboot notifier above. SP
+        * firmware waits 500 ms before issuing reset, so let's hang
+        * here for twice that delay and hopefuly we'd never reach
+        * the return statement.
+        */
+       mdelay(2 * RAVE_SP_RESET_DELAY_MS);
+
+       return -EIO;
+}
+
+static int rave_sp_wdt_start(struct watchdog_device *wdd)
+{
+       int ret;
+
+       ret = rave_sp_wdt_configure(wdd, true);
+       if (!ret)
+               set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+       return ret;
+}
+
+static int rave_sp_wdt_stop(struct watchdog_device *wdd)
+{
+       return rave_sp_wdt_configure(wdd, false);
+}
+
+static int rave_sp_wdt_set_timeout(struct watchdog_device *wdd,
+                                  unsigned int timeout)
+{
+       wdd->timeout = timeout;
+
+       return rave_sp_wdt_configure(wdd, watchdog_active(wdd));
+}
+
+static int rave_sp_wdt_ping(struct watchdog_device *wdd)
+{
+       u8 cmd[] = {
+               [0] = RAVE_SP_CMD_PET_WDT,
+               [1] = 0,
+       };
+
+       return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static const struct watchdog_info rave_sp_wdt_info = {
+       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+       .identity = "RAVE SP Watchdog",
+};
+
+static const struct watchdog_ops rave_sp_wdt_ops = {
+       .owner = THIS_MODULE,
+       .start = rave_sp_wdt_start,
+       .stop = rave_sp_wdt_stop,
+       .ping = rave_sp_wdt_ping,
+       .set_timeout = rave_sp_wdt_set_timeout,
+       .restart = rave_sp_wdt_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_legacy = {
+       .max_timeout = 255,
+       .min_timeout = 1,
+       .configure = rave_sp_wdt_legacy_configure,
+       .restart   = rave_sp_wdt_legacy_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_rdu = {
+       .max_timeout = 180,
+       .min_timeout = 60,
+       .configure = rave_sp_wdt_rdu_configure,
+       .restart   = rave_sp_wdt_rdu_restart,
+};
+
+static const struct of_device_id rave_sp_wdt_of_match[] = {
+       {
+               .compatible = "zii,rave-sp-watchdog-legacy",
+               .data = &rave_sp_wdt_legacy,
+       },
+       {
+               .compatible = "zii,rave-sp-watchdog",
+               .data = &rave_sp_wdt_rdu,
+       },
+       { /* sentinel */ }
+};
+
+static int rave_sp_wdt_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct watchdog_device *wdd;
+       struct rave_sp_wdt *sp_wd;
+       struct nvmem_cell *cell;
+       __le16 timeout = 0;
+       int ret;
+
+       sp_wd = devm_kzalloc(dev, sizeof(*sp_wd), GFP_KERNEL);
+       if (!sp_wd)
+               return -ENOMEM;
+
+       sp_wd->variant = of_device_get_match_data(dev);
+       sp_wd->sp      = dev_get_drvdata(dev->parent);
+
+       wdd              = &sp_wd->wdd;
+       wdd->parent      = dev;
+       wdd->info        = &rave_sp_wdt_info;
+       wdd->ops         = &rave_sp_wdt_ops;
+       wdd->min_timeout = sp_wd->variant->min_timeout;
+       wdd->max_timeout = sp_wd->variant->max_timeout;
+       wdd->status      = WATCHDOG_NOWAYOUT_INIT_STATUS;
+       wdd->timeout     = 60;
+
+       cell = nvmem_cell_get(dev, "wdt-timeout");
+       if (!IS_ERR(cell)) {
+               size_t len;
+               void *value = nvmem_cell_read(cell, &len);
+
+               if (!IS_ERR(value)) {
+                       memcpy(&timeout, value, min(len, sizeof(timeout)));
+                       kfree(value);
+               }
+               nvmem_cell_put(cell);
+       }
+       watchdog_init_timeout(wdd, le16_to_cpu(timeout), dev);
+       watchdog_set_restart_priority(wdd, 255);
+       watchdog_stop_on_unregister(wdd);
+
+       sp_wd->reboot_notifier.notifier_call = rave_sp_wdt_reboot_notifier;
+       ret = devm_register_reboot_notifier(dev, &sp_wd->reboot_notifier);
+       if (ret) {
+               dev_err(dev, "Failed to register reboot notifier\n");
+               return ret;
+       }
+
+       /*
+        * We don't know if watchdog is running now. To be sure, let's
+        * start it and depend on watchdog core to ping it
+        */
+       wdd->max_hw_heartbeat_ms = wdd->max_timeout * 1000;
+       ret = rave_sp_wdt_start(wdd);
+       if (ret) {
+               dev_err(dev, "Watchdog didn't start\n");
+               return ret;
+       }
+
+       ret = devm_watchdog_register_device(dev, wdd);
+       if (ret) {
+               dev_err(dev, "Failed to register watchdog device\n");
+               rave_sp_wdt_stop(wdd);
+               return ret;
+       }
+
+       return 0;
+}
+
+static struct platform_driver rave_sp_wdt_driver = {
+       .probe = rave_sp_wdt_probe,
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = rave_sp_wdt_of_match,
+       },
+};
+
+module_platform_driver(rave_sp_wdt_driver);
+
+MODULE_DEVICE_TABLE(of, rave_sp_wdt_of_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP Watchdog driver");
+MODULE_ALIAS("platform:rave-sp-watchdog");
index d8dd546..e5d0c28 100644 (file)
@@ -269,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU
 
 config XEN_ACPI_PROCESSOR
        tristate "Xen ACPI processor"
-       depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
+       depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
        default m
        help
           This ACPI processor uploads Power Management information to the Xen
index f77e499..065f0b6 100644 (file)
@@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource)
        kfree(resource);
 }
 
+/*
+ * Host memory not allocated to dom0. We can use this range for hotplug-based
+ * ballooning.
+ *
+ * It's a type-less resource. Setting IORESOURCE_MEM will make resource
+ * management algorithms (arch_remove_reservations()) look into guest e820,
+ * which we don't want.
+ */
+static struct resource hostmem_resource = {
+       .name   = "Host RAM",
+};
+
+void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
+{}
+
 static struct resource *additional_memory_resource(phys_addr_t size)
 {
-       struct resource *res;
-       int ret;
+       struct resource *res, *res_hostmem;
+       int ret = -ENOMEM;
 
        res = kzalloc(sizeof(*res), GFP_KERNEL);
        if (!res)
@@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size)
        res->name = "System RAM";
        res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
-       ret = allocate_resource(&iomem_resource, res,
-                               size, 0, -1,
-                               PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
-       if (ret < 0) {
-               pr_err("Cannot allocate new System RAM resource\n");
-               kfree(res);
-               return NULL;
+       res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
+       if (res_hostmem) {
+               /* Try to grab a range from hostmem */
+               res_hostmem->name = "Host memory";
+               ret = allocate_resource(&hostmem_resource, res_hostmem,
+                                       size, 0, -1,
+                                       PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+       }
+
+       if (!ret) {
+               /*
+                * Insert this resource into iomem. Because hostmem_resource
+                * tracks portion of guest e820 marked as UNUSABLE noone else
+                * should try to use it.
+                */
+               res->start = res_hostmem->start;
+               res->end = res_hostmem->end;
+               ret = insert_resource(&iomem_resource, res);
+               if (ret < 0) {
+                       pr_err("Can't insert iomem_resource [%llx - %llx]\n",
+                               res->start, res->end);
+                       release_memory_resource(res_hostmem);
+                       res_hostmem = NULL;
+                       res->start = res->end = 0;
+               }
+       }
+
+       if (ret) {
+               ret = allocate_resource(&iomem_resource, res,
+                                       size, 0, -1,
+                                       PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+               if (ret < 0) {
+                       pr_err("Cannot allocate new System RAM resource\n");
+                       kfree(res);
+                       return NULL;
+               }
        }
 
 #ifdef CONFIG_SPARSEMEM
@@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
                        pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
                               pfn, limit);
                        release_memory_resource(res);
+                       release_memory_resource(res_hostmem);
                        return NULL;
                }
        }
@@ -765,6 +810,8 @@ static int __init balloon_init(void)
        set_online_page_callback(&xen_online_page);
        register_memory_notifier(&xen_memory_nb);
        register_sysctl_table(xen_root);
+
+       arch_xen_balloon_init(&hostmem_resource);
 #endif
 
 #ifdef CONFIG_XEN_PV
index 57efbd3..bd56653 100644 (file)
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
                }
                range = 0;
                while (range < pages) {
-                       if (map->unmap_ops[offset+range].handle == -1) {
-                               range--;
+                       if (map->unmap_ops[offset+range].handle == -1)
                                break;
-                       }
                        range++;
                }
                err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ unlock_out:
 out_unlock_put:
        mutex_unlock(&priv->lock);
 out_put_map:
-       if (use_ptemod)
+       if (use_ptemod) {
                map->vma = NULL;
+               unmap_grant_pages(map, 0, map->count);
+       }
        gntdev_put_map(priv, map);
        return err;
 }
index 40caa92..4c789e6 100644 (file)
@@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
                pvcalls_exit();
                return ret;
        }
-       map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+       map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
        if (map2 == NULL) {
                clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
                          (void *)&map->passive.flags);
@@ -1103,7 +1103,7 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
                        kfree(map);
                }
        }
-       if (bedata->ref >= 0)
+       if (bedata->ref != -1)
                gnttab_end_foreign_access(bedata->ref, 0, 0);
        kfree(bedata->ring.sring);
        kfree(bedata);
@@ -1128,6 +1128,8 @@ static int pvcalls_front_probe(struct xenbus_device *dev,
        }
 
        versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
+       if (IS_ERR(versions))
+               return PTR_ERR(versions);
        if (!len)
                return -EINVAL;
        if (strcmp(versions, "1")) {
index 8b75463..af03c2a 100644 (file)
@@ -94,13 +94,13 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
        if (v9ses->cache)
                sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
 
-       sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
+       sb->s_flags |= SB_ACTIVE | SB_DIRSYNC | SB_NOATIME;
        if (!v9ses->cache)
-               sb->s_flags |= MS_SYNCHRONOUS;
+               sb->s_flags |= SB_SYNCHRONOUS;
 
 #ifdef CONFIG_9P_FS_POSIX_ACL
        if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
 #endif
 
        return 0;
index c9fdfb1..cfda2c7 100644 (file)
@@ -213,7 +213,7 @@ static int parse_options(struct super_block *sb, char *options)
 static int adfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return parse_options(sb, data);
 }
 
@@ -372,7 +372,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *root;
        int ret = -EINVAL;
 
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
 
        asb = kzalloc(sizeof(*asb), GFP_KERNEL);
        if (!asb)
index 185d5ab..0f0e692 100644 (file)
@@ -453,7 +453,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
        pr_crit("error (device %s): %s(): %pV\n", sb->s_id, function, &vaf);
        if (!sb_rdonly(sb))
                pr_warn("Remounting filesystem read-only\n");
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        va_end(args);
 }
 
index 2b13996..5ba9ef2 100644 (file)
@@ -250,12 +250,12 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
        int i, res = 0;
        struct affs_sb_info *sbi = AFFS_SB(sb);
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                return 0;
 
        if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
                pr_notice("Bitmap invalid - mounting %s read only\n", sb->s_id);
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
                return 0;
        }
 
@@ -288,7 +288,7 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
                if (affs_checksum_block(sb, bh)) {
                        pr_warn("Bitmap %u invalid - mounting %s read only.\n",
                                bm->bm_key, sb->s_id);
-                       *flags |= MS_RDONLY;
+                       *flags |= SB_RDONLY;
                        goto out;
                }
                pr_debug("read bitmap block %d: %d\n", blk, bm->bm_key);
index 884beda..1117e36 100644 (file)
@@ -356,7 +356,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic             = AFFS_SUPER_MAGIC;
        sb->s_op                = &affs_sops;
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
 
        sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
        if (!sbi)
@@ -466,7 +466,7 @@ got_root:
        if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS
             || chksum == MUFS_DCOFS) && !sb_rdonly(sb)) {
                pr_notice("Dircache FS - mounting %s read only\n", sb->s_id);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        switch (chksum) {
        case MUFS_FS:
@@ -488,7 +488,7 @@ got_root:
                /* fall thru */
        case FS_OFS:
                affs_set_opt(sbi->s_flags, SF_OFS);
-               sb->s_flags |= MS_NOEXEC;
+               sb->s_flags |= SB_NOEXEC;
                break;
        case MUFS_DCOFS:
        case MUFS_INTLOFS:
@@ -497,7 +497,7 @@ got_root:
        case FS_INTLOFS:
                affs_set_opt(sbi->s_flags, SF_INTL);
                affs_set_opt(sbi->s_flags, SF_OFS);
-               sb->s_flags |= MS_NOEXEC;
+               sb->s_flags |= SB_NOEXEC;
                break;
        default:
                pr_err("Unknown filesystem on device %s: %08X\n",
@@ -513,7 +513,7 @@ got_root:
                        sig, sig[3] + '0', blocksize);
        }
 
-       sb->s_flags |= MS_NODEV | MS_NOSUID;
+       sb->s_flags |= SB_NODEV | SB_NOSUID;
 
        sbi->s_data_blksize = sb->s_blocksize;
        if (affs_test_opt(sbi->s_flags, SF_OFS))
@@ -570,7 +570,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
        pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
 
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
 
        memcpy(volume, sbi->s_volume, 32);
        if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
@@ -596,10 +596,10 @@ affs_remount(struct super_block *sb, int *flags, char *data)
        memcpy(sbi->s_volume, volume, 32);
        spin_unlock(&sbi->symlink_lock);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                affs_free_bitmap(sb);
        else
                res = affs_init_bitmap(sb, flags);
index ff8d5bf..23c7f39 100644 (file)
@@ -895,20 +895,38 @@ error:
  * However, if we didn't have a callback promise outstanding, or it was
  * outstanding on a different server, then it won't break it either...
  */
-static int afs_dir_remove_link(struct dentry *dentry, struct key *key)
+static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
+                              unsigned long d_version_before,
+                              unsigned long d_version_after)
 {
+       bool dir_valid;
        int ret = 0;
 
+       /* There were no intervening changes on the server if the version
+        * number we got back was incremented by exactly 1.
+        */
+       dir_valid = (d_version_after == d_version_before + 1);
+
        if (d_really_is_positive(dentry)) {
                struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
 
-               if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
-                       kdebug("AFS_VNODE_DELETED");
-               clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
-
-               ret = afs_validate(vnode, key);
-               if (ret == -ESTALE)
+               if (dir_valid) {
+                       drop_nlink(&vnode->vfs_inode);
+                       if (vnode->vfs_inode.i_nlink == 0) {
+                               set_bit(AFS_VNODE_DELETED, &vnode->flags);
+                               clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+                       }
                        ret = 0;
+               } else {
+                       clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+
+                       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+                               kdebug("AFS_VNODE_DELETED");
+
+                       ret = afs_validate(vnode, key);
+                       if (ret == -ESTALE)
+                               ret = 0;
+               }
                _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
        }
 
@@ -923,6 +941,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
        struct afs_fs_cursor fc;
        struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
        struct key *key;
+       unsigned long d_version = (unsigned long)dentry->d_fsdata;
        int ret;
 
        _enter("{%x:%u},{%pd}",
@@ -955,7 +974,9 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
                afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
                ret = afs_end_vnode_operation(&fc);
                if (ret == 0)
-                       ret = afs_dir_remove_link(dentry, key);
+                       ret = afs_dir_remove_link(
+                               dentry, key, d_version,
+                               (unsigned long)dvnode->status.data_version);
        }
 
 error_key:
index 3415eb7..1e81864 100644 (file)
@@ -377,6 +377,10 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
        }
 
        read_sequnlock_excl(&vnode->cb_lock);
+
+       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+               clear_nlink(&vnode->vfs_inode);
+
        if (valid)
                goto valid;
 
index e03910c..804d1f9 100644 (file)
@@ -441,7 +441,10 @@ enum afs_lock_state {
 };
 
 /*
- * AFS inode private data
+ * AFS inode private data.
+ *
+ * Note that afs_alloc_inode() *must* reset anything that could incorrectly
+ * leak from one inode to another.
  */
 struct afs_vnode {
        struct inode            vfs_inode;      /* the VFS's inode record */
index ea1460b..e112665 100644 (file)
@@ -885,7 +885,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
 {
        struct afs_net *net = call->net;
        enum afs_call_state state;
-       u32 remote_abort;
+       u32 remote_abort = 0;
        int ret;
 
        _enter("{%s,%zu},,%zu,%d",
index 2b00097..b88b7d4 100644 (file)
@@ -120,7 +120,7 @@ static void afs_hash_permits(struct afs_permits *permits)
 void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
                      unsigned int cb_break)
 {
-       struct afs_permits *permits, *xpermits, *replacement, *new = NULL;
+       struct afs_permits *permits, *xpermits, *replacement, *zap, *new = NULL;
        afs_access_t caller_access = READ_ONCE(vnode->status.caller_access);
        size_t size = 0;
        bool changed = false;
@@ -204,7 +204,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
        new = kzalloc(sizeof(struct afs_permits) +
                      sizeof(struct afs_permit) * size, GFP_NOFS);
        if (!new)
-               return;
+               goto out_put;
 
        refcount_set(&new->usage, 1);
        new->nr_permits = size;
@@ -229,8 +229,6 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
 
        afs_hash_permits(new);
 
-       afs_put_permits(permits);
-
        /* Now see if the permit list we want is actually already available */
        spin_lock(&afs_permits_lock);
 
@@ -262,11 +260,15 @@ found:
        kfree(new);
 
        spin_lock(&vnode->lock);
-       if (cb_break != (vnode->cb_break + vnode->cb_interest->server->cb_s_break) ||
-           permits != rcu_access_pointer(vnode->permit_cache))
-               goto someone_else_changed_it_unlock;
-       rcu_assign_pointer(vnode->permit_cache, replacement);
+       zap = rcu_access_pointer(vnode->permit_cache);
+       if (cb_break == (vnode->cb_break + vnode->cb_interest->server->cb_s_break) &&
+           zap == permits)
+               rcu_assign_pointer(vnode->permit_cache, replacement);
+       else
+               zap = replacement;
        spin_unlock(&vnode->lock);
+       afs_put_permits(zap);
+out_put:
        afs_put_permits(permits);
        return;
 
index 875b5eb..1037dd4 100644 (file)
@@ -496,10 +496,10 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
                if (ret < 0)
                        goto error_sb;
                as = NULL;
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
        } else {
                _debug("reuse");
-               ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
+               ASSERTCMP(sb->s_flags, &, SB_ACTIVE);
                afs_destroy_sbi(as);
                as = NULL;
        }
@@ -536,7 +536,9 @@ static void afs_kill_super(struct super_block *sb)
 }
 
 /*
- * initialise an inode cache slab element prior to any use
+ * Initialise an inode cache slab element prior to any use.  Note that
+ * afs_alloc_inode() *must* reset anything that could incorrectly leak from one
+ * inode to another.
  */
 static void afs_i_init_once(void *_vnode)
 {
@@ -568,11 +570,21 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
 
        atomic_inc(&afs_count_active_inodes);
 
+       /* Reset anything that shouldn't leak from one inode to the next. */
        memset(&vnode->fid, 0, sizeof(vnode->fid));
        memset(&vnode->status, 0, sizeof(vnode->status));
 
        vnode->volume           = NULL;
+       vnode->lock_key         = NULL;
+       vnode->permit_cache     = NULL;
+       vnode->cb_interest      = NULL;
+#ifdef CONFIG_AFS_FSCACHE
+       vnode->cache            = NULL;
+#endif
+
        vnode->flags            = 1 << AFS_VNODE_UNSET;
+       vnode->cb_type          = 0;
+       vnode->lock_state       = AFS_VNODE_LOCK_NONE;
 
        _leave(" = %p", &vnode->vfs_inode);
        return &vnode->vfs_inode;
index cb5f8a3..9370e2f 100644 (file)
@@ -198,7 +198,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                        ret = afs_fill_page(vnode, key, pos + copied,
                                            len - copied, page);
                        if (ret < 0)
-                               return ret;
+                               goto out;
                }
                SetPageUptodate(page);
        }
@@ -206,10 +206,12 @@ int afs_write_end(struct file *file, struct address_space *mapping,
        set_page_dirty(page);
        if (PageDirty(page))
                _debug("dirtied");
+       ret = copied;
+
+out:
        unlock_page(page);
        put_page(page);
-
-       return copied;
+       return ret;
 }
 
 /*
index d79ced9..82e8f6e 100644 (file)
@@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
                pr_debug("waiting for mount name=%pd\n", path->dentry);
                status = autofs4_wait(sbi, path, NFY_MOUNT);
                pr_debug("mount wait done status=%d\n", status);
-               ino->last_used = jiffies;
        }
+       ino->last_used = jiffies;
        return status;
 }
 
@@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
         */
        if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
                struct dentry *parent = dentry->d_parent;
+               struct autofs_info *ino;
                struct dentry *new;
 
                new = d_lookup(parent, &dentry->d_name);
                if (!new)
                        return NULL;
-               if (new == dentry)
-                       dput(new);
-               else {
-                       struct autofs_info *ino;
-
-                       ino = autofs4_dentry_ino(new);
-                       ino->last_used = jiffies;
-                       dput(path->dentry);
-                       path->dentry = new;
-               }
+               ino = autofs4_dentry_ino(new);
+               ino->last_used = jiffies;
+               dput(path->dentry);
+               path->dentry = new;
        }
        return path->dentry;
 }
index 8fc4170..961a12d 100644 (file)
@@ -170,7 +170,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 
        mutex_unlock(&sbi->wq_mutex);
 
-       if (autofs4_write(sbi, pipe, &pkt, pktsz))
        switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
        case 0:
                break;
index 75a461c..16f2dfe 100644 (file)
@@ -365,7 +365,7 @@ Version 0.4 (2001-10-28)
        (fs/befs/super.c)
 
 * Tell the kernel to only mount befs read-only. 
-       By setting the MS_RDONLY flag in befs_read_super().
+       By setting the SB_RDONLY flag in befs_read_super().
        Not that it was possible to write before. But now the kernel won't even try.
        (fs/befs/super.c)
 
index a92355c..ee23623 100644 (file)
@@ -841,7 +841,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
        if (!sb_rdonly(sb)) {
                befs_warning(sb,
                             "No write support. Marking filesystem read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /*
@@ -948,7 +948,7 @@ static int
 befs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                return -EINVAL;
        return 0;
 }
index b35ce16..5982c8a 100644 (file)
@@ -295,7 +295,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                 unsigned long len, u64 disk_start,
                                 unsigned long compressed_len,
                                 struct page **compressed_pages,
-                                unsigned long nr_pages)
+                                unsigned long nr_pages,
+                                unsigned int write_flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct bio *bio = NULL;
@@ -327,7 +328,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
        bdev = fs_info->fs_devices->latest_bdev;
 
        bio = btrfs_bio_alloc(bdev, first_byte);
-       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+       bio->bi_opf = REQ_OP_WRITE | write_flags;
        bio->bi_private = cb;
        bio->bi_end_io = end_compressed_bio_write;
        refcount_set(&cb->pending_bios, 1);
@@ -374,7 +375,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                        bio_put(bio);
 
                        bio = btrfs_bio_alloc(bdev, first_byte);
-                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+                       bio->bi_opf = REQ_OP_WRITE | write_flags;
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
                        bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -1528,5 +1529,5 @@ unsigned int btrfs_compress_str2level(const char *str)
        if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
                return str[5] - '0';
 
-       return 0;
+       return BTRFS_ZLIB_DEFAULT_LEVEL;
 }
index da20755..0868cc5 100644 (file)
@@ -34,6 +34,8 @@
 /* Maximum size of data before compression */
 #define BTRFS_MAX_UNCOMPRESSED         (SZ_128K)
 
+#define        BTRFS_ZLIB_DEFAULT_LEVEL                3
+
 struct compressed_bio {
        /* number of bios pending for this compressed extent */
        refcount_t pending_bios;
@@ -91,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                  unsigned long len, u64 disk_start,
                                  unsigned long compressed_len,
                                  struct page **compressed_pages,
-                                 unsigned long nr_pages);
+                                 unsigned long nr_pages,
+                                 unsigned int write_flags);
 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags);
 
index 531e0a8..1e74cf8 100644 (file)
@@ -1032,14 +1032,17 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
                    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
                        ret = btrfs_inc_ref(trans, root, buf, 1);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret)
+                               return ret;
 
                        if (root->root_key.objectid ==
                            BTRFS_TREE_RELOC_OBJECTID) {
                                ret = btrfs_dec_ref(trans, root, buf, 0);
-                               BUG_ON(ret); /* -ENOMEM */
+                               if (ret)
+                                       return ret;
                                ret = btrfs_inc_ref(trans, root, cow, 1);
-                               BUG_ON(ret); /* -ENOMEM */
+                               if (ret)
+                                       return ret;
                        }
                        new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
                } else {
@@ -1049,7 +1052,8 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                                ret = btrfs_inc_ref(trans, root, cow, 1);
                        else
                                ret = btrfs_inc_ref(trans, root, cow, 0);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret)
+                               return ret;
                }
                if (new_flags != 0) {
                        int level = btrfs_header_level(buf);
@@ -1068,9 +1072,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                                ret = btrfs_inc_ref(trans, root, cow, 1);
                        else
                                ret = btrfs_inc_ref(trans, root, cow, 0);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret)
+                               return ret;
                        ret = btrfs_dec_ref(trans, root, buf, 1);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret)
+                               return ret;
                }
                clean_tree_block(fs_info, buf);
                *last_ref = 1;
index f7df553..13c260b 100644 (file)
@@ -2957,7 +2957,7 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
  */
 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
 {
-       return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
+       return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
 }
 
 static inline void free_fs_info(struct btrfs_fs_info *fs_info)
@@ -3180,6 +3180,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
                               int nr);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+                             unsigned int extra_bits,
                              struct extent_state **cached_state, int dedupe);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
                             struct btrfs_root *new_root,
index 5d73f79..a6226cd 100644 (file)
@@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
 
        spin_lock(&root->inode_lock);
        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
+
        if (node) {
                if (btrfs_inode->delayed_node) {
                        refcount_inc(&node->refs);      /* can be accessed */
@@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
                        spin_unlock(&root->inode_lock);
                        return node;
                }
-               btrfs_inode->delayed_node = node;
-               /* can be accessed and cached in the inode */
-               refcount_add(2, &node->refs);
+
+               /*
+                * It's possible that we're racing into the middle of removing
+                * this node from the radix tree.  In this case, the refcount
+                * was zero and it should never go back to one.  Just return
+                * NULL like it was never in the radix at all; our release
+                * function is in the process of removing it.
+                *
+                * Some implementations of refcount_inc refuse to bump the
+                * refcount once it has hit zero.  If we don't do this dance
+                * here, refcount_inc() may decide to just WARN_ONCE() instead
+                * of actually bumping the refcount.
+                *
+                * If this node is properly in the radix, we want to bump the
+                * refcount twice, once for the inode and once for this get
+                * operation.
+                */
+               if (refcount_inc_not_zero(&node->refs)) {
+                       refcount_inc(&node->refs);
+                       btrfs_inode->delayed_node = node;
+               } else {
+                       node = NULL;
+               }
+
                spin_unlock(&root->inode_lock);
                return node;
        }
@@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node(
        mutex_unlock(&delayed_node->mutex);
 
        if (refcount_dec_and_test(&delayed_node->refs)) {
-               bool free = false;
                struct btrfs_root *root = delayed_node->root;
+
                spin_lock(&root->inode_lock);
-               if (refcount_read(&delayed_node->refs) == 0) {
-                       radix_tree_delete(&root->delayed_nodes_tree,
-                                         delayed_node->inode_id);
-                       free = true;
-               }
+               /*
+                * Once our refcount goes to zero, nobody is allowed to bump it
+                * back up.  We can delete it now.
+                */
+               ASSERT(refcount_read(&delayed_node->refs) == 0);
+               radix_tree_delete(&root->delayed_nodes_tree,
+                                 delayed_node->inode_id);
                spin_unlock(&root->inode_lock);
-               if (free)
-                       kmem_cache_free(delayed_node_cache, delayed_node);
+               kmem_cache_free(delayed_node_cache, delayed_node);
        }
 }
 
@@ -1610,28 +1633,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
 int btrfs_should_delete_dir_index(struct list_head *del_list,
                                  u64 index)
 {
-       struct btrfs_delayed_item *curr, *next;
-       int ret;
-
-       if (list_empty(del_list))
-               return 0;
+       struct btrfs_delayed_item *curr;
+       int ret = 0;
 
-       list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+       list_for_each_entry(curr, del_list, readdir_list) {
                if (curr->key.offset > index)
                        break;
-
-               list_del(&curr->readdir_list);
-               ret = (curr->key.offset == index);
-
-               if (refcount_dec_and_test(&curr->refs))
-                       kfree(curr);
-
-               if (ret)
-                       return 1;
-               else
-                       continue;
+               if (curr->key.offset == index) {
+                       ret = 1;
+                       break;
+               }
        }
-       return 0;
+       return ret;
 }
 
 /*
index efce9a2..a8ecccf 100644 (file)
@@ -610,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
         * that we don't try and read the other copies of this block, just
         * return -EIO.
         */
-       if (found_level == 0 && btrfs_check_leaf(root, eb)) {
+       if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
                set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
                ret = -EIO;
        }
@@ -3231,6 +3231,7 @@ static int write_dev_supers(struct btrfs_device *device,
        int errors = 0;
        u32 crc;
        u64 bytenr;
+       int op_flags;
 
        if (max_mirrors == 0)
                max_mirrors = BTRFS_SUPER_MIRROR_MAX;
@@ -3273,13 +3274,10 @@ static int write_dev_supers(struct btrfs_device *device,
                 * we fua the first super.  The others we allow
                 * to go down lazy.
                 */
-               if (i == 0) {
-                       ret = btrfsic_submit_bh(REQ_OP_WRITE,
-                               REQ_SYNC | REQ_FUA | REQ_META | REQ_PRIO, bh);
-               } else {
-                       ret = btrfsic_submit_bh(REQ_OP_WRITE,
-                               REQ_SYNC | REQ_META | REQ_PRIO, bh);
-               }
+               op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
+               if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
+                       op_flags |= REQ_FUA;
+               ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
                if (ret)
                        errors++;
        }
@@ -3848,7 +3846,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
                                         buf->len,
                                         fs_info->dirty_metadata_batch);
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-       if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(root, buf)) {
+       /*
+        * Since btrfs_mark_buffer_dirty() can be called with item pointer set
+        * but item data not updated.
+        * So here we should only check item pointers, not item data.
+        */
+       if (btrfs_header_level(buf) == 0 &&
+           btrfs_check_leaf_relaxed(root, buf)) {
                btrfs_print_leaf(buf);
                ASSERT(0);
        }
index 7208ece..2f43285 100644 (file)
@@ -3502,13 +3502,6 @@ again:
                goto again;
        }
 
-       /* We've already setup this transaction, go ahead and exit */
-       if (block_group->cache_generation == trans->transid &&
-           i_size_read(inode)) {
-               dcs = BTRFS_DC_SETUP;
-               goto out_put;
-       }
-
        /*
         * We want to set the generation to 0, that way if anything goes wrong
         * from here on out we know not to trust this cache when we load up next
@@ -3532,6 +3525,13 @@ again:
        }
        WARN_ON(ret);
 
+       /* We've already setup this transaction, go ahead and exit */
+       if (block_group->cache_generation == trans->transid &&
+           i_size_read(inode)) {
+               dcs = BTRFS_DC_SETUP;
+               goto out_put;
+       }
+
        if (i_size_read(inode) > 0) {
                ret = btrfs_check_trunc_cache_free_space(fs_info,
                                        &fs_info->global_block_rsv);
@@ -9206,6 +9206,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
        ret = btrfs_del_root(trans, fs_info, &root->root_key);
        if (ret) {
                btrfs_abort_transaction(trans, ret);
+               err = ret;
                goto out_end_trans;
        }
 
index 16045ea..012d638 100644 (file)
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
        struct btrfs_bio *bbio = NULL;
        int ret;
 
-       ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
+       ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
        BUG_ON(!mirror_num);
 
        bio = btrfs_io_bio_alloc(1);
@@ -3253,7 +3253,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
                                               delalloc_start,
                                               delalloc_end,
                                               &page_started,
-                                              nr_written);
+                                              nr_written, wbc);
                /* File system has been set read-only */
                if (ret) {
                        SetPageError(page);
index 4a88613..93dcae0 100644 (file)
@@ -116,7 +116,8 @@ struct extent_io_ops {
         */
        int (*fill_delalloc)(void *private_data, struct page *locked_page,
                             u64 start, u64 end, int *page_started,
-                            unsigned long *nr_written);
+                            unsigned long *nr_written,
+                            struct writeback_control *wbc);
 
        int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
        void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
@@ -365,10 +366,11 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                       struct extent_state **cached_state);
 
 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
-               u64 end, struct extent_state **cached_state)
+                                     u64 end, unsigned int extra_bits,
+                                     struct extent_state **cached_state)
 {
        return set_extent_bit(tree, start, end,
-                             EXTENT_DELALLOC | EXTENT_UPTODATE,
+                             EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
                              NULL, cached_state, GFP_NOFS);
 }
 
index f80254d..eb1bac7 100644 (file)
@@ -477,6 +477,47 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
        }
 }
 
+static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+                                        const u64 start,
+                                        const u64 len,
+                                        struct extent_state **cached_state)
+{
+       u64 search_start = start;
+       const u64 end = start + len - 1;
+
+       while (search_start < end) {
+               const u64 search_len = end - search_start + 1;
+               struct extent_map *em;
+               u64 em_len;
+               int ret = 0;
+
+               em = btrfs_get_extent(inode, NULL, 0, search_start,
+                                     search_len, 0);
+               if (IS_ERR(em))
+                       return PTR_ERR(em);
+
+               if (em->block_start != EXTENT_MAP_HOLE)
+                       goto next;
+
+               em_len = em->len;
+               if (em->start < search_start)
+                       em_len -= search_start - em->start;
+               if (em_len > search_len)
+                       em_len = search_len;
+
+               ret = set_extent_bit(&inode->io_tree, search_start,
+                                    search_start + em_len - 1,
+                                    EXTENT_DELALLOC_NEW,
+                                    NULL, cached_state, GFP_NOFS);
+next:
+               search_start = extent_map_end(em);
+               free_extent_map(em);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
 /*
  * after copy_from_user, pages need to be dirtied and we need to make
  * sure holes are created between the current EOF and the start of
@@ -497,14 +538,34 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
        u64 end_of_last_block;
        u64 end_pos = pos + write_bytes;
        loff_t isize = i_size_read(inode);
+       unsigned int extra_bits = 0;
 
        start_pos = pos & ~((u64) fs_info->sectorsize - 1);
        num_bytes = round_up(write_bytes + pos - start_pos,
                             fs_info->sectorsize);
 
        end_of_last_block = start_pos + num_bytes - 1;
+
+       if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
+               if (start_pos >= isize &&
+                   !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
+                       /*
+                        * There can't be any extents following eof in this case
+                        * so just set the delalloc new bit for the range
+                        * directly.
+                        */
+                       extra_bits |= EXTENT_DELALLOC_NEW;
+               } else {
+                       err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
+                                                           start_pos,
+                                                           num_bytes, cached);
+                       if (err)
+                               return err;
+               }
+       }
+
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
-                                       cached, 0);
+                                       extra_bits, cached, 0);
        if (err)
                return err;
 
@@ -1404,47 +1465,6 @@ fail:
 
 }
 
-static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
-                                        const u64 start,
-                                        const u64 len,
-                                        struct extent_state **cached_state)
-{
-       u64 search_start = start;
-       const u64 end = start + len - 1;
-
-       while (search_start < end) {
-               const u64 search_len = end - search_start + 1;
-               struct extent_map *em;
-               u64 em_len;
-               int ret = 0;
-
-               em = btrfs_get_extent(inode, NULL, 0, search_start,
-                                     search_len, 0);
-               if (IS_ERR(em))
-                       return PTR_ERR(em);
-
-               if (em->block_start != EXTENT_MAP_HOLE)
-                       goto next;
-
-               em_len = em->len;
-               if (em->start < search_start)
-                       em_len -= search_start - em->start;
-               if (em_len > search_len)
-                       em_len = search_len;
-
-               ret = set_extent_bit(&inode->io_tree, search_start,
-                                    search_start + em_len - 1,
-                                    EXTENT_DELALLOC_NEW,
-                                    NULL, cached_state, GFP_NOFS);
-next:
-               search_start = extent_map_end(em);
-               free_extent_map(em);
-               if (ret)
-                       return ret;
-       }
-       return 0;
-}
-
 /*
  * This function locks the extent and properly waits for data=ordered extents
  * to finish before allowing the pages to be modified if need.
@@ -1473,10 +1493,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
                + round_up(pos + write_bytes - start_pos,
                           fs_info->sectorsize) - 1;
 
-       if (start_pos < inode->vfs_inode.i_size ||
-           (inode->flags & BTRFS_INODE_PREALLOC)) {
+       if (start_pos < inode->vfs_inode.i_size) {
                struct btrfs_ordered_extent *ordered;
-               unsigned int clear_bits;
 
                lock_extent_bits(&inode->io_tree, start_pos, last_pos,
                                cached_state);
@@ -1498,19 +1516,10 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
                }
                if (ordered)
                        btrfs_put_ordered_extent(ordered);
-               ret = btrfs_find_new_delalloc_bytes(inode, start_pos,
-                                                   last_pos - start_pos + 1,
-                                                   cached_state);
-               clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC |
-                       EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG;
-               if (ret)
-                       clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED;
-               clear_extent_bit(&inode->io_tree, start_pos,
-                                last_pos, clear_bits,
-                                (clear_bits & EXTENT_LOCKED) ? 1 : 0,
-                                0, cached_state, GFP_NOFS);
-               if (ret)
-                       return ret;
+               clear_extent_bit(&inode->io_tree, start_pos, last_pos,
+                                EXTENT_DIRTY | EXTENT_DELALLOC |
+                                EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+                                0, 0, cached_state, GFP_NOFS);
                *lockstart = start_pos;
                *lockend = last_pos;
                ret = 1;
@@ -2048,6 +2057,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        len = (u64)end - (u64)start + 1;
        trace_btrfs_sync_file(file, datasync);
 
+       btrfs_init_log_ctx(&ctx, inode);
+
        /*
         * We write the dirty pages in the range and wait until they complete
         * out of the ->i_mutex. If so, we can flush the dirty pages by
@@ -2194,8 +2205,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        }
        trans->sync = true;
 
-       btrfs_init_log_ctx(&ctx, inode);
-
        ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
        if (ret < 0) {
                /* Fallthrough and commit/free transaction. */
@@ -2253,6 +2262,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                ret = btrfs_end_transaction(trans);
        }
 out:
+       ASSERT(list_empty(&ctx.list));
        err = file_check_and_advance_wb_err(file);
        if (!ret)
                ret = err;
index cdc9f40..4426d1c 100644 (file)
@@ -1264,7 +1264,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        /* Lock all pages first so we can lock the extent safely. */
        ret = io_ctl_prepare_pages(io_ctl, inode, 0);
        if (ret)
-               goto out;
+               goto out_unlock;
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
                         &cached_state);
@@ -1358,6 +1358,7 @@ out_nospc_locked:
 out_nospc:
        cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
 
+out_unlock:
        if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
                up_write(&block_group->data_rwsem);
 
index b93fe05..e1a7f3c 100644 (file)
@@ -378,6 +378,7 @@ struct async_cow {
        struct page *locked_page;
        u64 start;
        u64 end;
+       unsigned int write_flags;
        struct list_head extents;
        struct btrfs_work work;
 };
@@ -857,7 +858,8 @@ retry:
                                    async_extent->ram_size,
                                    ins.objectid,
                                    ins.offset, async_extent->pages,
-                                   async_extent->nr_pages)) {
+                                   async_extent->nr_pages,
+                                   async_cow->write_flags)) {
                        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
                        struct page *p = async_extent->pages[0];
                        const u64 start = async_extent->start;
@@ -1191,7 +1193,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
 
 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                                u64 start, u64 end, int *page_started,
-                               unsigned long *nr_written)
+                               unsigned long *nr_written,
+                               unsigned int write_flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct async_cow *async_cow;
@@ -1208,6 +1211,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                async_cow->root = root;
                async_cow->locked_page = locked_page;
                async_cow->start = start;
+               async_cow->write_flags = write_flags;
 
                if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
                    !btrfs_test_opt(fs_info, FORCE_COMPRESS))
@@ -1577,11 +1581,13 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
  */
 static int run_delalloc_range(void *private_data, struct page *locked_page,
                              u64 start, u64 end, int *page_started,
-                             unsigned long *nr_written)
+                             unsigned long *nr_written,
+                             struct writeback_control *wbc)
 {
        struct inode *inode = private_data;
        int ret;
        int force_cow = need_force_cow(inode, start, end);
+       unsigned int write_flags = wbc_to_write_flags(wbc);
 
        if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
@@ -1596,7 +1602,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
                set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
                        &BTRFS_I(inode)->runtime_flags);
                ret = cow_file_range_async(inode, locked_page, start, end,
-                                          page_started, nr_written);
+                                          page_started, nr_written,
+                                          write_flags);
        }
        if (ret)
                btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
@@ -2025,11 +2032,12 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+                             unsigned int extra_bits,
                              struct extent_state **cached_state, int dedupe)
 {
        WARN_ON((end & (PAGE_SIZE - 1)) == 0);
        return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
-                                  cached_state);
+                                  extra_bits, cached_state);
 }
 
 /* see btrfs_writepage_start_hook for details on why this is required */
@@ -2090,7 +2098,7 @@ again:
                goto out;
         }
 
-       btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
+       btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state,
                                  0);
        ClearPageChecked(page);
        set_page_dirty(page);
@@ -2997,6 +3005,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                compress_type = ordered_extent->compress_type;
        if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
                BUG_ON(compress_type);
+               btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
+                                      ordered_extent->len);
                ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
                                                ordered_extent->file_offset,
                                                ordered_extent->file_offset +
@@ -4790,7 +4800,7 @@ again:
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
+       ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
                                        &cached_state, 0);
        if (ret) {
                unlock_extent_cached(io_tree, block_start, block_end,
@@ -5438,6 +5448,14 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
                goto out_err;
 
        btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
+       if (location->type != BTRFS_INODE_ITEM_KEY &&
+           location->type != BTRFS_ROOT_ITEM_KEY) {
+               btrfs_warn(root->fs_info,
+"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
+                          __func__, name, btrfs_ino(BTRFS_I(dir)),
+                          location->objectid, location->type, location->offset);
+               goto out_err;
+       }
 out:
        btrfs_free_path(path);
        return ret;
@@ -5754,8 +5772,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
                return inode;
        }
 
-       BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
-
        index = srcu_read_lock(&fs_info->subvol_srcu);
        ret = fixup_tree_root_location(fs_info, dir, dentry,
                                       &location, &sub_root);
@@ -9150,7 +9166,7 @@ again:
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, page_start, end,
+       ret = btrfs_set_extent_delalloc(inode, page_start, end, 0,
                                        &cached_state, 0);
        if (ret) {
                unlock_extent_cached(io_tree, page_start, page_end,
index fd172a9..2ef8aca 100644 (file)
@@ -1172,7 +1172,7 @@ again:
        if (!i_done || ret)
                goto out;
 
-       if (!(inode->i_sb->s_flags & MS_ACTIVE))
+       if (!(inode->i_sb->s_flags & SB_ACTIVE))
                goto out;
 
        /*
@@ -1333,7 +1333,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                 * make sure we stop running if someone unmounts
                 * the FS
                 */
-               if (!(inode->i_sb->s_flags & MS_ACTIVE))
+               if (!(inode->i_sb->s_flags & SB_ACTIVE))
                        break;
 
                if (btrfs_defrag_cancelled(fs_info)) {
@@ -2206,7 +2206,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
        if (!path)
                return -ENOMEM;
 
-       ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
+       ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
 
        key.objectid = tree_id;
        key.type = BTRFS_ROOT_ITEM_KEY;
index 4cf2eb6..f0c3f00 100644 (file)
@@ -3268,7 +3268,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        nr++;
                }
 
-               btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
+               btrfs_set_extent_delalloc(inode, page_start, page_end, 0, NULL,
+                                         0);
                set_page_dirty(page);
 
                unlock_extent(&BTRFS_I(inode)->io_tree,
index c10e4c7..20d3300 100644 (file)
@@ -3521,7 +3521,40 @@ out:
 }
 
 /*
- * Check if ino ino1 is an ancestor of inode ino2 in the given root.
+ * Check if inode ino2, or any of its ancestors, is inode ino1.
+ * Return 1 if true, 0 if false and < 0 on error.
+ */
+static int check_ino_in_path(struct btrfs_root *root,
+                            const u64 ino1,
+                            const u64 ino1_gen,
+                            const u64 ino2,
+                            const u64 ino2_gen,
+                            struct fs_path *fs_path)
+{
+       u64 ino = ino2;
+
+       if (ino1 == ino2)
+               return ino1_gen == ino2_gen;
+
+       while (ino > BTRFS_FIRST_FREE_OBJECTID) {
+               u64 parent;
+               u64 parent_gen;
+               int ret;
+
+               fs_path_reset(fs_path);
+               ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
+               if (ret < 0)
+                       return ret;
+               if (parent == ino1)
+                       return parent_gen == ino1_gen;
+               ino = parent;
+       }
+       return 0;
+}
+
+/*
+ * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
+ * possible path (in case ino2 is not a directory and has multiple hard links).
  * Return 1 if true, 0 if false and < 0 on error.
  */
 static int is_ancestor(struct btrfs_root *root,
@@ -3530,36 +3563,91 @@ static int is_ancestor(struct btrfs_root *root,
                       const u64 ino2,
                       struct fs_path *fs_path)
 {
-       u64 ino = ino2;
-       bool free_path = false;
+       bool free_fs_path = false;
        int ret = 0;
+       struct btrfs_path *path = NULL;
+       struct btrfs_key key;
 
        if (!fs_path) {
                fs_path = fs_path_alloc();
                if (!fs_path)
                        return -ENOMEM;
-               free_path = true;
+               free_fs_path = true;
        }
 
-       while (ino > BTRFS_FIRST_FREE_OBJECTID) {
-               u64 parent;
-               u64 parent_gen;
+       path = alloc_path_for_send();
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
-               fs_path_reset(fs_path);
-               ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
-               if (ret < 0) {
-                       if (ret == -ENOENT && ino == ino2)
-                               ret = 0;
-                       goto out;
+       key.objectid = ino2;
+       key.type = BTRFS_INODE_REF_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+
+       while (true) {
+               struct extent_buffer *leaf = path->nodes[0];
+               int slot = path->slots[0];
+               u32 cur_offset = 0;
+               u32 item_size;
+
+               if (slot >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto out;
+                       if (ret > 0)
+                               break;
+                       continue;
                }
-               if (parent == ino1) {
-                       ret = parent_gen == ino1_gen ? 1 : 0;
-                       goto out;
+
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+               if (key.objectid != ino2)
+                       break;
+               if (key.type != BTRFS_INODE_REF_KEY &&
+                   key.type != BTRFS_INODE_EXTREF_KEY)
+                       break;
+
+               item_size = btrfs_item_size_nr(leaf, slot);
+               while (cur_offset < item_size) {
+                       u64 parent;
+                       u64 parent_gen;
+
+                       if (key.type == BTRFS_INODE_EXTREF_KEY) {
+                               unsigned long ptr;
+                               struct btrfs_inode_extref *extref;
+
+                               ptr = btrfs_item_ptr_offset(leaf, slot);
+                               extref = (struct btrfs_inode_extref *)
+                                       (ptr + cur_offset);
+                               parent = btrfs_inode_extref_parent(leaf,
+                                                                  extref);
+                               cur_offset += sizeof(*extref);
+                               cur_offset += btrfs_inode_extref_name_len(leaf,
+                                                                 extref);
+                       } else {
+                               parent = key.offset;
+                               cur_offset = item_size;
+                       }
+
+                       ret = get_inode_info(root, parent, NULL, &parent_gen,
+                                            NULL, NULL, NULL, NULL);
+                       if (ret < 0)
+                               goto out;
+                       ret = check_ino_in_path(root, ino1, ino1_gen,
+                                               parent, parent_gen, fs_path);
+                       if (ret)
+                               goto out;
                }
-               ino = parent;
+               path->slots[0]++;
        }
+       ret = 0;
  out:
-       if (free_path)
+       btrfs_free_path(path);
+       if (free_fs_path)
                fs_path_free(fs_path);
        return ret;
 }
index 65af029..3a4dce1 100644 (file)
@@ -107,7 +107,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
                return;
 
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                btrfs_info(fs_info, "forced readonly");
                /*
                 * Note that a running device replace operation is not
@@ -137,7 +137,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
 
        /*
         * Special case: if the error is EROFS, and we're already
-        * under MS_RDONLY, then it is safe here.
+        * under SB_RDONLY, then it is safe here.
         */
        if (errno == -EROFS && sb_rdonly(sb))
                return;
@@ -168,7 +168,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
        set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 
        /* Don't go through full error handling during mount */
-       if (sb->s_flags & MS_BORN)
+       if (sb->s_flags & SB_BORN)
                btrfs_handle_error(fs_info);
 }
 
@@ -507,9 +507,18 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                            token == Opt_compress_force ||
                            strncmp(args[0].from, "zlib", 4) == 0) {
                                compress_type = "zlib";
+
                                info->compress_type = BTRFS_COMPRESS_ZLIB;
-                               info->compress_level =
-                                       btrfs_compress_str2level(args[0].from);
+                               info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
+                               /*
+                                * args[0] contains uninitialized data since
+                                * for these tokens we don't expect any
+                                * parameter.
+                                */
+                               if (token != Opt_compress &&
+                                   token != Opt_compress_force)
+                                       info->compress_level =
+                                         btrfs_compress_str2level(args[0].from);
                                btrfs_set_opt(info->mount_opt, COMPRESS);
                                btrfs_clear_opt(info->mount_opt, NODATACOW);
                                btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -625,7 +634,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        break;
                case Opt_acl:
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
-                       info->sb->s_flags |= MS_POSIXACL;
+                       info->sb->s_flags |= SB_POSIXACL;
                        break;
 #else
                        btrfs_err(info, "support for ACL not compiled in!");
@@ -633,7 +642,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        goto out;
 #endif
                case Opt_noacl:
-                       info->sb->s_flags &= ~MS_POSIXACL;
+                       info->sb->s_flags &= ~SB_POSIXACL;
                        break;
                case Opt_notreelog:
                        btrfs_set_and_info(info, NOTREELOG,
@@ -851,7 +860,7 @@ check:
        /*
         * Extra check for current option against current flag
         */
-       if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
+       if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
                btrfs_err(info,
                          "nologreplay must be used with ro mount option");
                ret = -EINVAL;
@@ -1147,7 +1156,7 @@ static int btrfs_fill_super(struct super_block *sb,
        sb->s_xattr = btrfs_xattr_handlers;
        sb->s_time_gran = 1;
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        sb->s_flags |= SB_I_VERSION;
        sb->s_iflags |= SB_I_CGROUPWB;
@@ -1180,7 +1189,7 @@ static int btrfs_fill_super(struct super_block *sb,
        }
 
        cleancache_init_fs(sb);
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
        return 0;
 
 fail_close:
@@ -1277,7 +1286,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
                seq_puts(seq, ",flushoncommit");
        if (btrfs_test_opt(info, DISCARD))
                seq_puts(seq, ",discard");
-       if (!(info->sb->s_flags & MS_POSIXACL))
+       if (!(info->sb->s_flags & SB_POSIXACL))
                seq_puts(seq, ",noacl");
        if (btrfs_test_opt(info, SPACE_CACHE))
                seq_puts(seq, ",space_cache");
@@ -1409,11 +1418,11 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
 
        mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs);
        if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) {
-               if (flags & MS_RDONLY) {
-                       mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY,
+               if (flags & SB_RDONLY) {
+                       mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~SB_RDONLY,
                                             device_name, newargs);
                } else {
-                       mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY,
+                       mnt = vfs_kern_mount(&btrfs_fs_type, flags | SB_RDONLY,
                                             device_name, newargs);
                        if (IS_ERR(mnt)) {
                                root = ERR_CAST(mnt);
@@ -1565,7 +1574,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        u64 subvol_objectid = 0;
        int error = 0;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        error = btrfs_parse_early_options(data, mode, fs_type,
@@ -1619,13 +1628,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (error)
                goto error_fs_info;
 
-       if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
+       if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
                error = -EACCES;
                goto error_close_devices;
        }
 
        bdev = fs_devices->latest_bdev;
-       s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC,
+       s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
                 fs_info);
        if (IS_ERR(s)) {
                error = PTR_ERR(s);
@@ -1635,7 +1644,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (s->s_root) {
                btrfs_close_devices(fs_devices);
                free_fs_info(fs_info);
-               if ((flags ^ s->s_flags) & MS_RDONLY)
+               if ((flags ^ s->s_flags) & SB_RDONLY)
                        error = -EBUSY;
        } else {
                snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
@@ -1702,11 +1711,11 @@ static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
 {
        if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
            (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
-            (flags & MS_RDONLY))) {
+            (flags & SB_RDONLY))) {
                /* wait for any defraggers to finish */
                wait_event(fs_info->transaction_wait,
                           (atomic_read(&fs_info->defrag_running) == 0));
-               if (flags & MS_RDONLY)
+               if (flags & SB_RDONLY)
                        sync_filesystem(fs_info->sb);
        }
 }
@@ -1766,10 +1775,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
        btrfs_resize_thread_pool(fs_info,
                fs_info->thread_pool_size, old_thread_pool_size);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out;
 
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                /*
                 * this also happens on 'umount -rf' or on shutdown, when
                 * the filesystem is busy.
@@ -1781,10 +1790,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                /* avoid complains from lockdep et al. */
                up(&fs_info->uuid_tree_rescan_sem);
 
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
                /*
-                * Setting MS_RDONLY will put the cleaner thread to
+                * Setting SB_RDONLY will put the cleaner thread to
                 * sleep at the next loop if it's already active.
                 * If it's already asleep, we'll leave unused block
                 * groups on disk until we're mounted read-write again
@@ -1856,7 +1865,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                                goto restore;
                        }
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                set_bit(BTRFS_FS_OPEN, &fs_info->flags);
        }
@@ -1866,9 +1875,9 @@ out:
        return 0;
 
 restore:
-       /* We've hit an error - don't reset MS_RDONLY */
+       /* We've hit an error - don't reset SB_RDONLY */
        if (sb_rdonly(sb))
-               old_flags |= MS_RDONLY;
+               old_flags |= SB_RDONLY;
        sb->s_flags = old_flags;
        fs_info->mount_opt = old_opts;
        fs_info->compress_type = old_compress_type;
index d06b1c9..2e7f64a 100644 (file)
@@ -114,7 +114,7 @@ static int test_find_delalloc(u32 sectorsize)
         * |--- delalloc ---|
         * |---  search  ---|
         */
-       set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
+       set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
        start = 0;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -145,7 +145,7 @@ static int test_find_delalloc(u32 sectorsize)
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
        }
-       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
+       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -200,7 +200,7 @@ static int test_find_delalloc(u32 sectorsize)
         *
         * We are re-using our test_start from above since it works out well.
         */
-       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
+       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
index f797642..30affb6 100644 (file)
@@ -968,7 +968,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        btrfs_test_inode_set_ops(inode);
 
        /* [BTRFS_MAX_EXTENT_SIZE] */
-       ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
+       ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 0,
                                        NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -984,7 +984,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
                                        BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
-                                       NULL, 0);
+                                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1018,7 +1018,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
                                        (BTRFS_MAX_EXTENT_SIZE >> 1)
                                        + sectorsize - 1,
-                                       NULL, 0);
+                                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1036,7 +1036,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
                        (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
-                       NULL, 0);
+                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1053,7 +1053,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        */
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + sectorsize,
-                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1089,7 +1089,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
         */
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + sectorsize,
-                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
index 114fc5f..ce4ed6e 100644 (file)
@@ -242,7 +242,8 @@ static int check_leaf_item(struct btrfs_root *root,
        return ret;
 }
 
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
+static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf,
+                     bool check_item_data)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        /* No valid key type is 0, so all key should be larger than this key */
@@ -361,10 +362,15 @@ int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
                        return -EUCLEAN;
                }
 
-               /* Check if the item size and content meet other criteria */
-               ret = check_leaf_item(root, leaf, &key, slot);
-               if (ret < 0)
-                       return ret;
+               if (check_item_data) {
+                       /*
+                        * Check if the item size and content meet other
+                        * criteria
+                        */
+                       ret = check_leaf_item(root, leaf, &key, slot);
+                       if (ret < 0)
+                               return ret;
+               }
 
                prev_key.objectid = key.objectid;
                prev_key.type = key.type;
@@ -374,6 +380,17 @@ int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
        return 0;
 }
 
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf)
+{
+       return check_leaf(root, leaf, true);
+}
+
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+                            struct extent_buffer *leaf)
+{
+       return check_leaf(root, leaf, false);
+}
+
 int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
 {
        unsigned long nr = btrfs_header_nritems(node);
index 96c486e..3d53e8d 100644 (file)
 #include "ctree.h"
 #include "extent_io.h"
 
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf);
+/*
+ * Comprehensive leaf checker.
+ * Will check not only the item pointers, but also every possible member
+ * in item data.
+ */
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf);
+
+/*
+ * Less strict leaf checker.
+ * Will only check item pointers, not reading item data.
+ */
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+                            struct extent_buffer *leaf);
 int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
 
 #endif
index aa7c71c..7bf9b31 100644 (file)
@@ -4102,7 +4102,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
 
        if (ordered_io_err) {
                ctx->io_err = -EIO;
-               return 0;
+               return ctx->io_err;
        }
 
        btrfs_init_map_token(&token);
index f1ecb93..a256842 100644 (file)
@@ -189,6 +189,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
                                    struct btrfs_device, dev_list);
                list_del(&device->dev_list);
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
        }
        kfree(fs_devices);
@@ -236,7 +237,6 @@ static struct btrfs_device *__alloc_device(void)
                kfree(dev);
                return ERR_PTR(-ENOMEM);
        }
-       bio_get(dev->flush_bio);
 
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_alloc_list);
@@ -578,6 +578,7 @@ static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
                                fs_devs->num_devices--;
                                list_del(&dev->dev_list);
                                rcu_string_free(dev->name);
+                               bio_put(dev->flush_bio);
                                kfree(dev);
                        }
                        break;
@@ -630,6 +631,7 @@ static noinline int device_list_add(const char *path,
 
                name = rcu_string_strdup(path, GFP_NOFS);
                if (!name) {
+                       bio_put(device->flush_bio);
                        kfree(device);
                        return -ENOMEM;
                }
@@ -742,6 +744,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                        name = rcu_string_strdup(orig_dev->name->str,
                                        GFP_KERNEL);
                        if (!name) {
+                               bio_put(device->flush_bio);
                                kfree(device);
                                goto error;
                        }
@@ -807,6 +810,7 @@ again:
                list_del_init(&device->dev_list);
                fs_devices->num_devices--;
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
        }
 
@@ -1750,20 +1754,24 @@ static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
        key.offset = device->devid;
 
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-       if (ret < 0)
-               goto out;
-
-       if (ret > 0) {
-               ret = -ENOENT;
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
                goto out;
        }
 
        ret = btrfs_del_item(trans, root, path);
-       if (ret)
-               goto out;
+       if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+       }
+
 out:
        btrfs_free_path(path);
-       btrfs_commit_transaction(trans);
+       if (!ret)
+               ret = btrfs_commit_transaction(trans);
        return ret;
 }
 
@@ -1993,7 +2001,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
        fs_devices = srcdev->fs_devices;
 
        list_del_rcu(&srcdev->dev_list);
-       list_del_rcu(&srcdev->dev_alloc_list);
+       list_del(&srcdev->dev_alloc_list);
        fs_devices->num_devices--;
        if (srcdev->missing)
                fs_devices->missing_devices--;
@@ -2349,6 +2357,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
 
        name = rcu_string_strdup(device_path, GFP_KERNEL);
        if (!name) {
+               bio_put(device->flush_bio);
                kfree(device);
                ret = -ENOMEM;
                goto error;
@@ -2358,6 +2367,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
                ret = PTR_ERR(trans);
                goto error;
@@ -2384,7 +2394,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
 
        if (seeding_dev) {
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
                ret = btrfs_prepare_sprout(fs_info);
                if (ret) {
                        btrfs_abort_transaction(trans, ret);
@@ -2497,10 +2507,11 @@ error_sysfs:
        btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
 error_trans:
        if (seeding_dev)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        if (trans)
                btrfs_end_transaction(trans);
        rcu_string_free(device->name);
+       bio_put(device->flush_bio);
        kfree(device);
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -2567,6 +2578,7 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
 
        name = rcu_string_strdup(device_path, GFP_KERNEL);
        if (!name) {
+               bio_put(device->flush_bio);
                kfree(device);
                ret = -ENOMEM;
                goto error;
@@ -6284,6 +6296,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
 
                ret = find_next_devid(fs_info, &tmp);
                if (ret) {
+                       bio_put(dev->flush_bio);
                        kfree(dev);
                        return ERR_PTR(ret);
                }
index ab69dcb..1b46825 100644 (file)
@@ -1440,6 +1440,29 @@ static int __close_session(struct ceph_mds_client *mdsc,
        return request_close_session(mdsc, session);
 }
 
+static bool drop_negative_children(struct dentry *dentry)
+{
+       struct dentry *child;
+       bool all_negative = true;
+
+       if (!d_is_dir(dentry))
+               goto out;
+
+       spin_lock(&dentry->d_lock);
+       list_for_each_entry(child, &dentry->d_subdirs, d_child) {
+               if (d_really_is_positive(child)) {
+                       all_negative = false;
+                       break;
+               }
+       }
+       spin_unlock(&dentry->d_lock);
+
+       if (all_negative)
+               shrink_dcache_parent(dentry);
+out:
+       return all_negative;
+}
+
 /*
  * Trim old(er) caps.
  *
@@ -1490,16 +1513,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        if ((used | wanted) & ~oissued & mine)
                goto out;   /* we need these caps */
 
-       session->s_trim_caps--;
        if (oissued) {
                /* we aren't the only cap.. just remove us */
                __ceph_remove_cap(cap, true);
+               session->s_trim_caps--;
        } else {
+               struct dentry *dentry;
                /* try dropping referring dentries */
                spin_unlock(&ci->i_ceph_lock);
-               d_prune_aliases(inode);
-               dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
-                    inode, cap, atomic_read(&inode->i_count));
+               dentry = d_find_any_alias(inode);
+               if (dentry && drop_negative_children(dentry)) {
+                       int count;
+                       dput(dentry);
+                       d_prune_aliases(inode);
+                       count = atomic_read(&inode->i_count);
+                       if (count == 1)
+                               session->s_trim_caps--;
+                       dout("trim_caps_cb %p cap %p pruned, count now %d\n",
+                            inode, cap, count);
+               } else {
+                       dput(dentry);
+               }
                return 0;
        }
 
index fe9fbb3..a62d2a9 100644 (file)
@@ -331,11 +331,11 @@ static int parse_fsopt_token(char *c, void *private)
                break;
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
        case Opt_acl:
-               fsopt->sb_flags |= MS_POSIXACL;
+               fsopt->sb_flags |= SB_POSIXACL;
                break;
 #endif
        case Opt_noacl:
-               fsopt->sb_flags &= ~MS_POSIXACL;
+               fsopt->sb_flags &= ~SB_POSIXACL;
                break;
        default:
                BUG_ON(token);
@@ -520,7 +520,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",nopoolperm");
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
-       if (fsopt->sb_flags & MS_POSIXACL)
+       if (fsopt->sb_flags & SB_POSIXACL)
                seq_puts(m, ",acl");
        else
                seq_puts(m, ",noacl");
@@ -988,7 +988,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        dout("ceph_mount\n");
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
-       flags |= MS_POSIXACL;
+       flags |= SB_POSIXACL;
 #endif
        err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
        if (err < 0) {
index cbd216b..350fa55 100644 (file)
@@ -42,7 +42,7 @@
 #define CIFS_MOUNT_MULTIUSER   0x20000 /* multiuser mount */
 #define CIFS_MOUNT_STRICT_IO   0x40000 /* strict cache mode */
 #define CIFS_MOUNT_RWPIDFORWARD        0x80000 /* use pid forwarding for rw */
-#define CIFS_MOUNT_POSIXACL    0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
+#define CIFS_MOUNT_POSIXACL    0x100000 /* mirror of SB_POSIXACL in mnt_cifs_flags */
 #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
 #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
 #define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */
index 8c8b75d..31b7565 100644 (file)
@@ -125,7 +125,7 @@ cifs_read_super(struct super_block *sb)
        tcon = cifs_sb_master_tcon(cifs_sb);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
 
        if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -497,7 +497,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_puts(s, ",cifsacl");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
                seq_puts(s, ",dynperm");
-       if (root->d_sb->s_flags & MS_POSIXACL)
+       if (root->d_sb->s_flags & SB_POSIXACL)
                seq_puts(s, ",acl");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
                seq_puts(s, ",mfsymlinks");
@@ -573,7 +573,7 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
 static int cifs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return 0;
 }
 
@@ -708,7 +708,7 @@ cifs_do_mount(struct file_system_type *fs_type,
 
        rc = cifs_mount(cifs_sb, volume_info);
        if (rc) {
-               if (!(flags & MS_SILENT))
+               if (!(flags & SB_SILENT))
                        cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
                                 rc);
                root = ERR_PTR(rc);
@@ -720,7 +720,7 @@ cifs_do_mount(struct file_system_type *fs_type,
        mnt_data.flags = flags;
 
        /* BB should we make this contingent on mount parm? */
-       flags |= MS_NODIRATIME | MS_NOATIME;
+       flags |= SB_NODIRATIME | SB_NOATIME;
 
        sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
        if (IS_ERR(sb)) {
@@ -739,7 +739,7 @@ cifs_do_mount(struct file_system_type *fs_type,
                        goto out_super;
                }
 
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
        }
 
        root = cifs_get_root(volume_info, sb);
index e185b28..b165835 100644 (file)
@@ -559,8 +559,8 @@ struct smb_vol {
                         CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
                         CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
 
-#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
-                     MS_NODEV | MS_SYNCHRONOUS)
+#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
+                     SB_NODEV | SB_SYNCHRONOUS)
 
 struct cifs_mnt_data {
        struct cifs_sb_info *cifs_sb;
index 7c732cb..ecb9907 100644 (file)
@@ -985,7 +985,7 @@ retry_iget5_locked:
                }
 
                cifs_fattr_to_inode(inode, fattr);
-               if (sb->s_flags & MS_NOATIME)
+               if (sb->s_flags & SB_NOATIME)
                        inode->i_flags |= S_NOATIME | S_NOCMTIME;
                if (inode->i_state & I_NEW) {
                        inode->i_ino = hash;
index e067404..ed88ab8 100644 (file)
@@ -1406,7 +1406,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
        } while (rc == -EAGAIN);
 
        if (rc) {
-               cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
+               if (rc != -ENOENT)
+                       cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
                goto out;
        }
 
index 5331631..01346b8 100644 (file)
@@ -2678,27 +2678,27 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        cifs_small_buf_release(req);
 
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
-       shdr = get_sync_hdr(rsp);
 
-       if (shdr->Status == STATUS_END_OF_FILE) {
+       if (rc) {
+               if (rc != -ENODATA) {
+                       cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
+                       cifs_dbg(VFS, "Send error in read = %d\n", rc);
+               }
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
-               return 0;
+               return rc == -ENODATA ? 0 : rc;
        }
 
-       if (rc) {
-               cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
-               cifs_dbg(VFS, "Send error in read = %d\n", rc);
-       } else {
-               *nbytes = le32_to_cpu(rsp->DataLength);
-               if ((*nbytes > CIFS_MAX_MSGSIZE) ||
-                   (*nbytes > io_parms->length)) {
-                       cifs_dbg(FYI, "bad length %d for count %d\n",
-                                *nbytes, io_parms->length);
-                       rc = -EIO;
-                       *nbytes = 0;
-               }
+       *nbytes = le32_to_cpu(rsp->DataLength);
+       if ((*nbytes > CIFS_MAX_MSGSIZE) ||
+           (*nbytes > io_parms->length)) {
+               cifs_dbg(FYI, "bad length %d for count %d\n",
+                        *nbytes, io_parms->length);
+               rc = -EIO;
+               *nbytes = 0;
        }
 
+       shdr = get_sync_hdr(rsp);
+
        if (*buf) {
                memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes);
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
index 52f975d..316af84 100644 (file)
@@ -117,7 +117,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
 #ifdef CONFIG_CIFS_POSIX
                if (!value)
                        goto out;
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
                                value, (const int)size,
                                ACL_TYPE_ACCESS, cifs_sb->local_nls,
@@ -129,7 +129,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
 #ifdef CONFIG_CIFS_POSIX
                if (!value)
                        goto out;
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
                                value, (const int)size,
                                ACL_TYPE_DEFAULT, cifs_sb->local_nls,
@@ -266,7 +266,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
 
        case XATTR_ACL_ACCESS:
 #ifdef CONFIG_CIFS_POSIX
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
                                value, size, ACL_TYPE_ACCESS,
                                cifs_sb->local_nls,
@@ -276,7 +276,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
 
        case XATTR_ACL_DEFAULT:
 #ifdef CONFIG_CIFS_POSIX
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
                                value, size, ACL_TYPE_DEFAULT,
                                cifs_sb->local_nls,
index 6f0a6a4..97424cf 100644 (file)
@@ -96,7 +96,7 @@ void coda_destroy_inodecache(void)
 static int coda_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
@@ -188,7 +188,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
        mutex_unlock(&vc->vc_mutex);
 
        sb->s_fs_info = vc;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
        sb->s_blocksize = 4096; /* XXXXX  what do we put here?? */
        sb->s_blocksize_bits = 12;
        sb->s_magic = CODA_SUPER_MAGIC;
index f937082..58e2fe4 100644 (file)
@@ -34,6 +34,7 @@ config CRAMFS_BLOCKDEV
 config CRAMFS_MTD
        bool "Support CramFs image directly mapped in physical memory"
        depends on CRAMFS && MTD
+       depends on CRAMFS=m || MTD=y
        default y if !CRAMFS_BLOCKDEV
        help
          This option allows the CramFs driver to load data directly from
index 9a2ab41..017b0ab 100644 (file)
@@ -505,7 +505,7 @@ static void cramfs_kill_sb(struct super_block *sb)
 static int cramfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -592,7 +592,7 @@ static int cramfs_finalize_super(struct super_block *sb,
        struct inode *root;
 
        /* Set it all up.. */
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        sb->s_op = &cramfs_ops;
        root = get_cramfs_inode(sb, cramfs_root, 0);
        if (IS_ERR(root))
index f2677c9..025d66a 100644 (file)
@@ -560,8 +560,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
         * Set the POSIX ACL flag based on whether they're enabled in the lower
         * mount.
         */
-       s->s_flags = flags & ~MS_POSIXACL;
-       s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
+       s->s_flags = flags & ~SB_POSIXACL;
+       s->s_flags |= path.dentry->d_sb->s_flags & SB_POSIXACL;
 
        /**
         * Force a read-only eCryptfs mount when:
@@ -569,7 +569,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
         *   2) The ecryptfs_encrypted_view mount option is specified
         */
        if (sb_rdonly(path.dentry->d_sb) || mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
 
        s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
        s->s_blocksize = path.dentry->d_sb->s_blocksize;
@@ -602,7 +602,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
        ecryptfs_set_dentry_private(s->s_root, root_info);
        root_info->lower_path = path;
 
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
        return dget(s->s_root);
 
 out_free:
index 65b5900..6ffb7ba 100644 (file)
@@ -116,7 +116,7 @@ static void destroy_inodecache(void)
 static int efs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -311,7 +311,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
 #ifdef DEBUG
                pr_info("forcing read-only mode\n");
 #endif
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
        }
        s->s_op   = &efs_superblock_operations;
        s->s_export_op = &efs_export_ops;
index 1d6243d..7eb8d21 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1216,15 +1216,14 @@ killed:
        return -EAGAIN;
 }
 
-char *get_task_comm(char *buf, struct task_struct *tsk)
+char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
 {
-       /* buf must be at least sizeof(tsk->comm) in size */
        task_lock(tsk);
-       strncpy(buf, tsk->comm, sizeof(tsk->comm));
+       strncpy(buf, tsk->comm, buf_size);
        task_unlock(tsk);
        return buf;
 }
-EXPORT_SYMBOL_GPL(get_task_comm);
+EXPORT_SYMBOL_GPL(__get_task_comm);
 
 /*
  * These functions flushes out all traces of the currently running executable
@@ -1350,9 +1349,14 @@ void setup_new_exec(struct linux_binprm * bprm)
 
        current->sas_ss_sp = current->sas_ss_size = 0;
 
-       /* Figure out dumpability. */
+       /*
+        * Figure out dumpability. Note that this checking only of current
+        * is wrong, but userspace depends on it. This should be testing
+        * bprm->secureexec instead.
+        */
        if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
-           bprm->secureexec)
+           !(uid_eq(current_euid(), current_uid()) &&
+             gid_eq(current_egid(), current_gid())))
                set_dumpable(current->mm, suid_dumpable);
        else
                set_dumpable(current->mm, SUID_DUMP_USER);
index e1b3724..33db133 100644 (file)
@@ -548,7 +548,7 @@ do_more:
        }
 
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        group_adjust_blocks(sb, block_group, desc, bh2, group_freed);
@@ -1424,7 +1424,7 @@ allocated:
        percpu_counter_sub(&sbi->s_freeblocks_counter, num);
 
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        *errp = 0;
index a1fc3da..6484199 100644 (file)
@@ -145,7 +145,7 @@ void ext2_free_inode (struct inode * inode)
        else
                ext2_release_inode(sb, block_group, is_directory);
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        brelse(bitmap_bh);
@@ -517,7 +517,7 @@ repeat_in_this_group:
        goto fail;
 got:
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
        brelse(bitmap_bh);
 
index e2b6be0..7646818 100644 (file)
@@ -75,7 +75,7 @@ void ext2_error(struct super_block *sb, const char *function,
        if (test_opt(sb, ERRORS_RO)) {
                ext2_msg(sb, KERN_CRIT,
                             "error: remounting filesystem read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 }
 
@@ -656,7 +656,7 @@ static int ext2_setup_super (struct super_block * sb,
                ext2_msg(sb, KERN_ERR,
                        "error: revision level too high, "
                        "forcing read-only mode");
-               res = MS_RDONLY;
+               res = SB_RDONLY;
        }
        if (read_only)
                return res;
@@ -924,9 +924,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_resuid = opts.s_resuid;
        sbi->s_resgid = opts.s_resgid;
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
                ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
-                MS_POSIXACL : 0);
+                SB_POSIXACL : 0);
        sb->s_iflags |= SB_I_CGROUPWB;
 
        if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
@@ -1178,7 +1178,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
                ext2_msg(sb, KERN_WARNING,
                        "warning: mounting ext3 filesystem as ext2");
        if (ext2_setup_super (sb, es, sb_rdonly(sb)))
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        ext2_write_super(sb);
        return 0;
 
@@ -1341,9 +1341,9 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
                         "dax flag with busy inodes while remounting");
                new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
        }
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out_set;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
                    !(sbi->s_mount_state & EXT2_VALID_FS))
                        goto out_set;
@@ -1379,7 +1379,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
                 */
                sbi->s_mount_state = le16_to_cpu(es->s_state);
                if (!ext2_setup_super (sb, es, 0))
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
                spin_unlock(&sbi->s_lock);
 
                ext2_write_super(sb);
@@ -1392,8 +1392,8 @@ out_set:
        sbi->s_mount_opt = new_opts.s_mount_opt;
        sbi->s_resuid = new_opts.s_resuid;
        sbi->s_resgid = new_opts.s_resgid;
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
        spin_unlock(&sbi->s_lock);
 
        return 0;
index 07bca11..c941251 100644 (file)
@@ -4722,6 +4722,7 @@ retry:
                                                    EXT4_INODE_EOFBLOCKS);
                }
                ext4_mark_inode_dirty(handle, inode);
+               ext4_update_inode_fsync_trans(handle, inode, 1);
                ret2 = ext4_journal_stop(handle);
                if (ret2)
                        break;
index b4267d7..b32cf26 100644 (file)
@@ -816,6 +816,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
                struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
 
+               if (IS_ERR(p))
+                       return ERR_CAST(p);
                if (p) {
                        int acl_size = p->a_count * sizeof(ext4_acl_entry);
 
index 0992d76..534a913 100644 (file)
@@ -149,6 +149,15 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
  */
 int ext4_inode_is_fast_symlink(struct inode *inode)
 {
+       if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+               int ea_blocks = EXT4_I(inode)->i_file_acl ?
+                               EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
+
+               if (ext4_has_inline_data(inode))
+                       return 0;
+
+               return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
+       }
        return S_ISLNK(inode->i_mode) && inode->i_size &&
               (inode->i_size < EXT4_N_BLOCKS * 4);
 }
@@ -2742,7 +2751,7 @@ static int ext4_writepages(struct address_space *mapping,
         * If the filesystem has aborted, it is read-only, so return
         * right away instead of dumping stack traces later on that
         * will obscure the real source of the problem.  We test
-        * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
+        * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
         * the latter could be true if the filesystem is mounted
         * read-only, and in that case, ext4_writepages should
         * *never* be called, so if that ever happens, we would want
@@ -5183,7 +5192,7 @@ static int ext4_do_update_inode(handle_t *handle,
 
        ext4_inode_csum_set(inode, raw_inode, ei);
        spin_unlock(&ei->i_raw_lock);
-       if (inode->i_sb->s_flags & MS_LAZYTIME)
+       if (inode->i_sb->s_flags & SB_LAZYTIME)
                ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
                                              bh->b_data);
 
index 798b3ac..e750d68 100644 (file)
@@ -1399,6 +1399,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
                               "falling back\n"));
        }
        nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+       if (!nblocks) {
+               ret = NULL;
+               goto cleanup_and_exit;
+       }
        start = EXT4_I(dir)->i_dir_start_lookup;
        if (start >= nblocks)
                start = 0;
index 0556cd0..7c46693 100644 (file)
@@ -422,7 +422,7 @@ static void ext4_handle_error(struct super_block *sb)
                 * before ->s_flags update
                 */
                smp_wmb();
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if (test_opt(sb, ERRORS_PANIC)) {
                if (EXT4_SB(sb)->s_journal &&
@@ -635,7 +635,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
                 * before ->s_flags update
                 */
                smp_wmb();
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                if (EXT4_SB(sb)->s_journal)
                        jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
                save_error_info(sb, function, line);
@@ -1682,10 +1682,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                sb->s_flags |= SB_I_VERSION;
                return 1;
        case Opt_lazytime:
-               sb->s_flags |= MS_LAZYTIME;
+               sb->s_flags |= SB_LAZYTIME;
                return 1;
        case Opt_nolazytime:
-               sb->s_flags &= ~MS_LAZYTIME;
+               sb->s_flags &= ~SB_LAZYTIME;
                return 1;
        }
 
@@ -2116,7 +2116,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
        if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
                ext4_msg(sb, KERN_ERR, "revision level too high, "
                         "forcing read-only mode");
-               res = MS_RDONLY;
+               res = SB_RDONLY;
        }
        if (read_only)
                goto done;
@@ -2429,7 +2429,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
 
        if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
                /* don't clear list on RO mount w/ errors */
-               if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
+               if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
                        ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
                                  "clearing orphan list.\n");
                        es->s_last_orphan = 0;
@@ -2438,19 +2438,19 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                return;
        }
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
        }
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
 
        /*
         * Turn on quotas which were not enabled for read-only mounts if
         * filesystem has quota feature, so that they are updated correctly.
         */
-       if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+       if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
                int ret = ext4_enable_quotas(sb);
 
                if (!ret)
@@ -2539,7 +2539,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                }
        }
 #endif
-       sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 }
 
 /*
@@ -2741,7 +2741,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
 
        if (ext4_has_feature_readonly(sb)) {
                ext4_msg(sb, KERN_INFO, "filesystem is read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                return 1;
        }
 
@@ -3623,8 +3623,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                sb->s_iflags |= SB_I_CGROUPWB;
        }
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
 
        if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
            (ext4_has_compat_features(sb) ||
@@ -4199,7 +4199,7 @@ no_journal:
        }
 
        if (ext4_setup_super(sb, es, sb_rdonly(sb)))
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
        /* determine the minimum size of new large inodes, if present */
        if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
@@ -4693,7 +4693,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
         * the clock is set in the future, and this will cause e2fsck
         * to complain and force a full file system check.
         */
-       if (!(sb->s_flags & MS_RDONLY))
+       if (!(sb->s_flags & SB_RDONLY))
                es->s_wtime = cpu_to_le32(get_seconds());
        if (sb->s_bdev->bd_part)
                es->s_kbytes_written =
@@ -5047,8 +5047,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
                ext4_abort(sb, "Abort forced by user");
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
 
        es = sbi->s_es;
 
@@ -5057,16 +5057,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
        }
 
-       if (*flags & MS_LAZYTIME)
-               sb->s_flags |= MS_LAZYTIME;
+       if (*flags & SB_LAZYTIME)
+               sb->s_flags |= SB_LAZYTIME;
 
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
                if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
                        err = -EROFS;
                        goto restore_opts;
                }
 
-               if (*flags & MS_RDONLY) {
+               if (*flags & SB_RDONLY) {
                        err = sync_filesystem(sb);
                        if (err < 0)
                                goto restore_opts;
@@ -5078,7 +5078,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                         * First of all, the unconditional stuff we have to do
                         * to disable replay of the journal when we next remount
                         */
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
 
                        /*
                         * OK, test if we are remounting a valid rw partition
@@ -5140,7 +5140,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                ext4_clear_journal_err(sb, es);
                        sbi->s_mount_state = le16_to_cpu(es->s_state);
                        if (!ext4_setup_super(sb, es, 0))
-                               sb->s_flags &= ~MS_RDONLY;
+                               sb->s_flags &= ~SB_RDONLY;
                        if (ext4_has_feature_mmp(sb))
                                if (ext4_multi_mount_protect(sb,
                                                le64_to_cpu(es->s_mmp_block))) {
@@ -5164,7 +5164,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 
        ext4_setup_system_zone(sb);
-       if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
+       if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY))
                ext4_commit_super(sb, 1);
 
 #ifdef CONFIG_QUOTA
@@ -5182,7 +5182,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 #endif
 
-       *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
+       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
        ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
        kfree(orig_data);
        return 0;
index dd2e73e..4aa69bc 100644 (file)
@@ -617,17 +617,17 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
        if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
                return 0;
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sbi->sb->s_flags &= ~MS_RDONLY;
+               sbi->sb->s_flags &= ~SB_RDONLY;
        }
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sbi->sb->s_flags |= MS_ACTIVE;
+       sbi->sb->s_flags |= SB_ACTIVE;
 
        /* Turn on quotas so that they are updated correctly */
-       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
 #endif
 
        start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@@ -658,7 +658,7 @@ out:
        if (quota_enabled)
                f2fs_quota_off_umount(sbi->sb);
 #endif
-       sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 
        return err;
 }
index f4e094e..6abf26c 100644 (file)
@@ -2378,7 +2378,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
 
 static inline int f2fs_readonly(struct super_block *sb)
 {
-       return sb->s_flags & MS_RDONLY;
+       return sb->s_flags & SB_RDONLY;
 }
 
 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
index 5d5bba4..d844dcb 100644 (file)
@@ -1005,7 +1005,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
 
        cpc.reason = __get_cp_reason(sbi);
 gc_more:
-       if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
+       if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
                ret = -EINVAL;
                goto stop;
        }
index 92c57ac..b3a14b0 100644 (file)
@@ -598,16 +598,16 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
        int quota_enabled;
 #endif
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sbi->sb->s_flags &= ~MS_RDONLY;
+               sbi->sb->s_flags &= ~SB_RDONLY;
        }
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sbi->sb->s_flags |= MS_ACTIVE;
+       sbi->sb->s_flags |= SB_ACTIVE;
        /* Turn on quotas so that they are updated correctly */
-       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
 #endif
 
        fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
@@ -671,7 +671,7 @@ out:
        if (quota_enabled)
                f2fs_quota_off_umount(sbi->sb);
 #endif
-       sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 
        return ret ? ret: err;
 }
index a6c5dd4..708155d 100644 (file)
@@ -534,10 +534,10 @@ static int parse_options(struct super_block *sb, char *options)
 #endif
                        break;
                case Opt_lazytime:
-                       sb->s_flags |= MS_LAZYTIME;
+                       sb->s_flags |= SB_LAZYTIME;
                        break;
                case Opt_nolazytime:
-                       sb->s_flags &= ~MS_LAZYTIME;
+                       sb->s_flags &= ~SB_LAZYTIME;
                        break;
 #ifdef CONFIG_QUOTA
                case Opt_quota:
@@ -1168,7 +1168,7 @@ static void default_options(struct f2fs_sb_info *sbi)
        set_opt(sbi, INLINE_DENTRY);
        set_opt(sbi, EXTENT_CACHE);
        set_opt(sbi, NOHEAP);
-       sbi->sb->s_flags |= MS_LAZYTIME;
+       sbi->sb->s_flags |= SB_LAZYTIME;
        set_opt(sbi, FLUSH_MERGE);
        if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
                set_opt_mode(sbi, F2FS_MOUNT_LFS);
@@ -1236,7 +1236,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
 #endif
 
        /* recover superblocks we couldn't write due to previous RO mount */
-       if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+       if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
                err = f2fs_commit_super(sbi, false);
                f2fs_msg(sb, KERN_INFO,
                        "Try to recover all the superblocks, ret: %d", err);
@@ -1255,17 +1255,17 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * Previous and new state of filesystem is RO,
         * so skip checking GC and FLUSH_MERGE conditions.
         */
-       if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
+       if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
                goto skip;
 
 #ifdef CONFIG_QUOTA
-       if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
+       if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
                err = dquot_suspend(sb, -1);
                if (err < 0)
                        goto restore_opts;
        } else {
                /* dquot_resume needs RW */
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
                if (sb_any_quota_suspended(sb)) {
                        dquot_resume(sb, -1);
                } else if (f2fs_sb_has_quota_ino(sb)) {
@@ -1288,7 +1288,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * or if background_gc = off is passed in mount
         * option. Also sync the filesystem.
         */
-       if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
+       if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
                if (sbi->gc_thread) {
                        stop_gc_thread(sbi);
                        need_restart_gc = true;
@@ -1300,7 +1300,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                need_stop_gc = true;
        }
 
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                writeback_inodes_sb(sb, WB_REASON_SYNC);
                sync_inodes_sb(sb);
 
@@ -1314,7 +1314,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * We stop issue flush thread if FS is mounted as RO
         * or if flush_merge is not passed in mount option.
         */
-       if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+       if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
                clear_opt(sbi, FLUSH_MERGE);
                destroy_flush_cmd_control(sbi, false);
        } else {
@@ -1329,8 +1329,8 @@ skip:
                kfree(s_qf_names[i]);
 #endif
        /* Update the POSIXACL Flag */
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
 
        return 0;
 restore_gc:
@@ -2472,8 +2472,8 @@ try_onemore:
        sb->s_export_op = &f2fs_export_ops;
        sb->s_magic = F2FS_SUPER_MAGIC;
        sb->s_time_gran = 1;
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
        memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
 
        /* init f2fs-specific super block info */
index 48b2336..bac10de 100644 (file)
@@ -392,7 +392,7 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
                        memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
                        set_buffer_uptodate(c_bh);
                        mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
-                       if (sb->s_flags & MS_SYNCHRONOUS)
+                       if (sb->s_flags & SB_SYNCHRONOUS)
                                err = sync_dirty_buffer(c_bh);
                        brelse(c_bh);
                        if (err)
@@ -597,7 +597,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                }
 
                if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
-                       if (sb->s_flags & MS_SYNCHRONOUS) {
+                       if (sb->s_flags & SB_SYNCHRONOUS) {
                                err = fat_sync_bhs(bhs, nr_bhs);
                                if (err)
                                        goto error;
@@ -612,7 +612,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                fat_collect_bhs(bhs, &nr_bhs, &fatent);
        } while (cluster != FAT_ENT_EOF);
 
-       if (sb->s_flags & MS_SYNCHRONOUS) {
+       if (sb->s_flags & SB_SYNCHRONOUS) {
                err = fat_sync_bhs(bhs, nr_bhs);
                if (err)
                        goto error;
index 30c5239..20a0a89 100644 (file)
@@ -779,14 +779,14 @@ static void __exit fat_destroy_inodecache(void)
 
 static int fat_remount(struct super_block *sb, int *flags, char *data)
 {
-       int new_rdonly;
+       bool new_rdonly;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
-       *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
+       *flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
 
        sync_filesystem(sb);
 
        /* make sure we update state on remount. */
-       new_rdonly = *flags & MS_RDONLY;
+       new_rdonly = *flags & SB_RDONLY;
        if (new_rdonly != sb_rdonly(sb)) {
                if (new_rdonly)
                        fat_set_state(sb, 0, 0);
@@ -1352,7 +1352,7 @@ out:
        if (opts->unicode_xlate)
                opts->utf8 = 0;
        if (opts->nfs == FAT_NFS_NOSTALE_RO) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                sb->s_export_op = &fat_export_ops_nostale;
        }
 
@@ -1608,7 +1608,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                return -ENOMEM;
        sb->s_fs_info = sbi;
 
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
        sb->s_magic = MSDOS_SUPER_MAGIC;
        sb->s_op = &fat_sops;
        sb->s_export_op = &fat_export_ops;
index acc3aa3..f9bdc1e 100644 (file)
@@ -33,7 +33,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
        if (opts->errors == FAT_ERRORS_PANIC)
                panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id);
        else if (opts->errors == FAT_ERRORS_RO && !sb_rdonly(sb)) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                fat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
        }
 }
index 7d6a105..d24d275 100644 (file)
@@ -646,7 +646,7 @@ static void setup(struct super_block *sb)
 {
        MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations;
        sb->s_d_op = &msdos_dentry_operations;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
 }
 
 static int msdos_fill_super(struct super_block *sb, void *data, int silent)
index 455ce5b..f989efa 100644 (file)
@@ -116,7 +116,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
 static int vxfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -220,7 +220,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
        int ret = -EINVAL;
        u32 j;
 
-       sbp->s_flags |= MS_RDONLY;
+       sbp->s_flags |= SB_RDONLY;
 
        infp = kzalloc(sizeof(*infp), GFP_KERNEL);
        if (!infp) {
index 08f5deb..cea4836 100644 (file)
@@ -490,7 +490,7 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 
        /* while holding I_WB_SWITCH, no one else can update the association */
        spin_lock(&inode->i_lock);
-       if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+       if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
            inode->i_state & (I_WB_SWITCH | I_FREEING) ||
            inode_to_wb(inode) == isw->new_wb) {
                spin_unlock(&inode->i_lock);
index 2f504d6..624f18b 100644 (file)
@@ -130,7 +130,7 @@ static void fuse_evict_inode(struct inode *inode)
 {
        truncate_inode_pages_final(&inode->i_data);
        clear_inode(inode);
-       if (inode->i_sb->s_flags & MS_ACTIVE) {
+       if (inode->i_sb->s_flags & SB_ACTIVE) {
                struct fuse_conn *fc = get_fuse_conn(inode);
                struct fuse_inode *fi = get_fuse_inode(inode);
                fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
@@ -141,7 +141,7 @@ static void fuse_evict_inode(struct inode *inode)
 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (*flags & MS_MANDLOCK)
+       if (*flags & SB_MANDLOCK)
                return -EINVAL;
 
        return 0;
@@ -1056,10 +1056,10 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        int is_bdev = sb->s_bdev != NULL;
 
        err = -EINVAL;
-       if (sb->s_flags & MS_MANDLOCK)
+       if (sb->s_flags & SB_MANDLOCK)
                goto err;
 
-       sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
+       sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
 
        if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
@@ -1109,9 +1109,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
                goto err_dev_free;
 
        /* Handle umasking inside the fuse code */
-       if (sb->s_flags & MS_POSIXACL)
+       if (sb->s_flags & SB_POSIXACL)
                fc->dont_mask = 1;
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 
        fc->default_permissions = d.default_permissions;
        fc->allow_other = d.allow_other;
index a3711f5..ad55eb8 100644 (file)
@@ -1065,15 +1065,15 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
        sdp->sd_args = *args;
 
        if (sdp->sd_args.ar_spectator) {
-                sb->s_flags |= MS_RDONLY;
+                sb->s_flags |= SB_RDONLY;
                set_bit(SDF_RORECOVERY, &sdp->sd_flags);
        }
        if (sdp->sd_args.ar_posix_acl)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        if (sdp->sd_args.ar_nobarrier)
                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
 
-       sb->s_flags |= MS_NOSEC;
+       sb->s_flags |= SB_NOSEC;
        sb->s_magic = GFS2_MAGIC;
        sb->s_op = &gfs2_super_ops;
        sb->s_d_op = &gfs2_dops;
@@ -1257,7 +1257,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
        struct gfs2_args args;
        struct gfs2_sbd *sdp;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        bdev = blkdev_get_by_path(dev_name, mode, fs_type);
@@ -1313,15 +1313,15 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
 
        if (s->s_root) {
                error = -EBUSY;
-               if ((flags ^ s->s_flags) & MS_RDONLY)
+               if ((flags ^ s->s_flags) & SB_RDONLY)
                        goto error_super;
        } else {
                snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
                sb_set_blocksize(s, block_size(bdev));
-               error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
+               error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
                if (error)
                        goto error_super;
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
                bdev->bd_super = s;
        }
 
@@ -1365,7 +1365,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
                pr_warn("gfs2 mount does not exist\n");
                return ERR_CAST(s);
        }
-       if ((flags ^ s->s_flags) & MS_RDONLY) {
+       if ((flags ^ s->s_flags) & SB_RDONLY) {
                deactivate_locked_super(s);
                return ERR_PTR(-EBUSY);
        }
index 9cb5c9a..d81d46e 100644 (file)
@@ -1256,10 +1256,10 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
                return -EINVAL;
 
        if (sdp->sd_args.ar_spectator)
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
 
-       if ((sb->s_flags ^ *flags) & MS_RDONLY) {
-               if (*flags & MS_RDONLY)
+       if ((sb->s_flags ^ *flags) & SB_RDONLY) {
+               if (*flags & SB_RDONLY)
                        error = gfs2_make_fs_ro(sdp);
                else
                        error = gfs2_make_fs_rw(sdp);
@@ -1269,9 +1269,9 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
 
        sdp->sd_args = args;
        if (sdp->sd_args.ar_posix_acl)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        else
-               sb->s_flags &= ~MS_POSIXACL;
+               sb->s_flags &= ~SB_POSIXACL;
        if (sdp->sd_args.ar_nobarrier)
                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
        else
index a85ca8b..ca8b72d 100644 (file)
@@ -117,7 +117,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
                kfree(tr);
        up_read(&sdp->sd_log_flush_lock);
 
-       if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
+       if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
                gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
        if (alloced)
                sb_end_intwrite(sdp->sd_vfs);
index 894994d..460281b 100644 (file)
@@ -204,11 +204,11 @@ int hfs_mdb_get(struct super_block *sb)
        attrib = mdb->drAtrb;
        if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
                pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended.  mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
                pr_warn("filesystem is marked locked, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if (!sb_rdonly(sb)) {
                /* Mark the volume uncleanly unmounted in case we crash */
index 7e0d65e..1738767 100644 (file)
@@ -114,18 +114,18 @@ static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int hfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       *flags |= SB_NODIRATIME;
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (!(*flags & MS_RDONLY)) {
+       if (!(*flags & SB_RDONLY)) {
                if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
                        pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended.  leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
                        pr_warn("filesystem is marked locked, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                }
        }
        return 0;
@@ -407,7 +407,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_op = &hfs_super_operations;
        sb->s_xattr = hfs_xattr_handlers;
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
        mutex_init(&sbi->bitmap_lock);
 
        res = hfs_mdb_get(sb);
index e5bb2de..1d458b7 100644 (file)
@@ -329,9 +329,9 @@ static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (!(*flags & MS_RDONLY)) {
+       if (!(*flags & SB_RDONLY)) {
                struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
                int force = 0;
 
@@ -340,20 +340,20 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
 
                if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
                        pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended.  leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (force) {
                        /* nothing */
                } else if (vhdr->attributes &
                                cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
                        pr_warn("filesystem is marked locked, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (vhdr->attributes &
                                cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
                        pr_warn("filesystem is marked journaled, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                }
        }
        return 0;
@@ -455,16 +455,16 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
 
        if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
                pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended.  mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
                /* nothing */
        } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
                pr_warn("Filesystem is marked locked, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
                        !sb_rdonly(sb)) {
                pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        err = -EINVAL;
index 8d6b7e3..c83ece7 100644 (file)
@@ -150,7 +150,6 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx)
                        if (unlikely(ret < 0))
                                goto out;
                        ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
-                       file->f_version = inode->i_version;
                }
                next_pos = ctx->pos;
                if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) {
index 3b83456..a4ad18a 100644 (file)
@@ -419,7 +419,6 @@ int hpfs_add_dirent(struct inode *i,
                c = 1;
                goto ret;
        }       
-       i->i_version++;
        c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0);
        ret:
        return c;
@@ -726,7 +725,6 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
                        return 2;
                }
        }
-       i->i_version++;
        for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1);
        hpfs_delete_de(i->i_sb, dnode, de);
        hpfs_mark_4buffers_dirty(qbh);
index e0e60b1..7c49f1e 100644 (file)
@@ -288,7 +288,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
                                        goto bail;
                                }
                                if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
-                                       if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
+                                       if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & SB_RDONLY) goto ok;
                                        hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
                                        goto bail;
                                }
index 1516fb4..f2c3ebc 100644 (file)
@@ -78,7 +78,7 @@ void hpfs_error(struct super_block *s, const char *fmt, ...)
                        else {
                                pr_cont("; remounting read-only\n");
                                mark_dirty(s, 0);
-                               s->s_flags |= MS_RDONLY;
+                               s->s_flags |= SB_RDONLY;
                        }
                } else if (sb_rdonly(s))
                                pr_cont("; going on - but anything won't be destroyed because it's read-only\n");
@@ -235,7 +235,6 @@ static struct inode *hpfs_alloc_inode(struct super_block *sb)
        ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
        if (!ei)
                return NULL;
-       ei->vfs_inode.i_version = 1;
        return &ei->vfs_inode;
 }
 
@@ -457,7 +456,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
 
        sync_filesystem(s);
 
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
 
        hpfs_lock(s);
        uid = sbi->sb_uid; gid = sbi->sb_gid;
@@ -488,7 +487,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
        sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
        sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
 
-       if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
+       if (!(*flags & SB_RDONLY)) mark_dirty(s, 1);
 
        hpfs_unlock(s);
        return 0;
@@ -614,7 +613,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                goto bail4;
        }
 
-       s->s_flags |= MS_NOATIME;
+       s->s_flags |= SB_NOATIME;
 
        /* Fill superblock stuff */
        s->s_magic = HPFS_SUPER_MAGIC;
index 1e76730..8a85f3f 100644 (file)
@@ -639,11 +639,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
                /*
-                * page_put due to reference from alloc_huge_page()
                 * unlock_page because locked by add_to_page_cache()
+                * page_put due to reference from alloc_huge_page()
                 */
-               put_page(page);
                unlock_page(page);
+               put_page(page);
        }
 
        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
index fd40102..03102d6 100644 (file)
@@ -416,7 +416,7 @@ void inode_add_lru(struct inode *inode)
 {
        if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
                                I_FREEING | I_WILL_FREE)) &&
-           !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
+           !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
                inode_lru_list_add(inode);
 }
 
@@ -595,7 +595,7 @@ static void dispose_list(struct list_head *head)
  * @sb:                superblock to operate on
  *
  * Make sure that no inodes with zero refcount are retained.  This is
- * called by superblock shutdown after having MS_ACTIVE flag removed,
+ * called by superblock shutdown after having SB_ACTIVE flag removed,
  * so any inode reaching zero refcount during or after that call will
  * be immediately evicted.
  */
@@ -1492,7 +1492,7 @@ static void iput_final(struct inode *inode)
        else
                drop = generic_drop_inode(inode);
 
-       if (!drop && (sb->s_flags & MS_ACTIVE)) {
+       if (!drop && (sb->s_flags & SB_ACTIVE)) {
                inode_add_lru(inode);
                spin_unlock(&inode->i_lock);
                return;
@@ -1644,7 +1644,7 @@ int generic_update_time(struct inode *inode, struct timespec *time, int flags)
        if (flags & S_MTIME)
                inode->i_mtime = *time;
 
-       if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
+       if (!(inode->i_sb->s_flags & SB_LAZYTIME) || (flags & S_VERSION))
                iflags |= I_DIRTY_SYNC;
        __mark_inode_dirty(inode, iflags);
        return 0;
@@ -1691,7 +1691,7 @@ bool __atime_needs_update(const struct path *path, struct inode *inode,
 
        if (IS_NOATIME(inode))
                return false;
-       if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+       if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
                return false;
 
        if (mnt->mnt_flags & MNT_NOATIME)
index 447a24d..bc258a4 100644 (file)
@@ -114,7 +114,7 @@ static void destroy_inodecache(void)
 static int isofs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                return -EROFS;
        return 0;
 }
index e96c6b0..d8c274d 100644 (file)
@@ -409,10 +409,10 @@ int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
                mutex_unlock(&c->alloc_sem);
        }
 
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                jffs2_start_garbage_collect_thread(c);
 
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
index 824e61e..c2fbec1 100644 (file)
@@ -59,7 +59,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
 }
 
 
-#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & SB_RDONLY)
 
 #define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) )
 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
index 153f1c6..f60dee7 100644 (file)
@@ -301,10 +301,10 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_op = &jffs2_super_operations;
        sb->s_export_op = &jffs2_export_ops;
-       sb->s_flags = sb->s_flags | MS_NOATIME;
+       sb->s_flags = sb->s_flags | SB_NOATIME;
        sb->s_xattr = jffs2_xattr_handlers;
 #ifdef CONFIG_JFFS2_FS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        ret = jffs2_do_fill_super(sb, data, silent);
        return ret;
index 2f7b3af..90373ae 100644 (file)
@@ -87,7 +87,7 @@ static void jfs_handle_error(struct super_block *sb)
        else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
                jfs_err("ERROR: (device %s): remounting filesystem as read-only",
                        sb->s_id);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /* nothing is done for continue beyond marking the superblock dirty */
@@ -477,7 +477,7 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
                        return rc;
        }
 
-       if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+       if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
                /*
                 * Invalidate any previously read metadata.  fsck may have
                 * changed the on-disk data since we mounted r/o
@@ -488,12 +488,12 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
                ret = jfs_mount_rw(sb, 1);
 
                /* mark the fs r/w for quota activity */
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                dquot_resume(sb, -1);
                return ret;
        }
-       if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+       if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
                rc = dquot_suspend(sb, -1);
                if (rc < 0)
                        return rc;
@@ -545,7 +545,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
        sbi->flag = flag;
 
 #ifdef CONFIG_JFS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
 
        if (newLVSize) {
index 95a7c88..26dd9a5 100644 (file)
@@ -335,7 +335,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
                        deactivate_locked_super(sb);
                        return ERR_PTR(error);
                }
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
 
                mutex_lock(&kernfs_mutex);
                list_add(&info->node, &root->supers);
index 3aabe55..7ff3cb9 100644 (file)
@@ -246,7 +246,7 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
        struct inode *root;
        struct qstr d_name = QSTR_INIT(name, strlen(name));
 
-       s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+       s = sget_userns(fs_type, NULL, set_anon_super, SB_KERNMOUNT|SB_NOUSER,
                        &init_user_ns, NULL);
        if (IS_ERR(s))
                return ERR_CAST(s);
@@ -277,7 +277,7 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
        d_instantiate(dentry, root);
        s->s_root = dentry;
        s->s_d_op = dops;
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
        return dget(s->s_root);
 
 Enomem:
@@ -578,7 +578,7 @@ int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *c
        spin_lock(&pin_fs_lock);
        if (unlikely(!*mount)) {
                spin_unlock(&pin_fs_lock);
-               mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL);
+               mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
                if (IS_ERR(mnt))
                        return PTR_ERR(mnt);
                spin_lock(&pin_fs_lock);
index 0d4e590..826a891 100644 (file)
@@ -578,8 +578,10 @@ static void nlm_complain_hosts(struct net *net)
 
                if (ln->nrhosts == 0)
                        return;
-               printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net);
-               dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net);
+               pr_warn("lockd: couldn't shutdown host module for net %x!\n",
+                       net->ns.inum);
+               dprintk("lockd: %lu hosts left in net %x:\n", ln->nrhosts,
+                       net->ns.inum);
        } else {
                if (nrhosts == 0)
                        return;
@@ -590,9 +592,9 @@ static void nlm_complain_hosts(struct net *net)
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
-               dprintk("       %s (cnt %d use %d exp %ld net %p)\n",
+               dprintk("       %s (cnt %d use %d exp %ld net %x)\n",
                        host->h_name, atomic_read(&host->h_count),
-                       host->h_inuse, host->h_expires, host->net);
+                       host->h_inuse, host->h_expires, host->net->ns.inum);
        }
 }
 
@@ -605,7 +607,8 @@ nlm_shutdown_hosts_net(struct net *net)
        mutex_lock(&nlm_host_mutex);
 
        /* First, make all hosts eligible for gc */
-       dprintk("lockd: nuking all hosts in net %p...\n", net);
+       dprintk("lockd: nuking all hosts in net %x...\n",
+               net ? net->ns.inum : 0);
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
@@ -618,9 +621,8 @@ nlm_shutdown_hosts_net(struct net *net)
 
        /* Then, perform a garbage collection pass */
        nlm_gc_hosts(net);
-       mutex_unlock(&nlm_host_mutex);
-
        nlm_complain_hosts(net);
+       mutex_unlock(&nlm_host_mutex);
 }
 
 /*
@@ -646,7 +648,8 @@ nlm_gc_hosts(struct net *net)
        struct hlist_node *next;
        struct nlm_host *host;
 
-       dprintk("lockd: host garbage collection for net %p\n", net);
+       dprintk("lockd: host garbage collection for net %x\n",
+               net ? net->ns.inum : 0);
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
@@ -662,9 +665,10 @@ nlm_gc_hosts(struct net *net)
                if (atomic_read(&host->h_count) || host->h_inuse
                 || time_before(jiffies, host->h_expires)) {
                        dprintk("nlm_gc_hosts skipping %s "
-                               "(cnt %d use %d exp %ld net %p)\n",
+                               "(cnt %d use %d exp %ld net %x)\n",
                                host->h_name, atomic_read(&host->h_count),
-                               host->h_inuse, host->h_expires, host->net);
+                               host->h_inuse, host->h_expires,
+                               host->net->ns.inum);
                        continue;
                }
                nlm_destroy_host_locked(host);
index 9fbbd11..96cfb29 100644 (file)
@@ -110,7 +110,8 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
        clnt = nsm_create(host->net, host->nodename);
        if (IS_ERR(clnt)) {
                dprintk("lockd: failed to create NSM upcall transport, "
-                       "status=%ld, net=%p\n", PTR_ERR(clnt), host->net);
+                       "status=%ld, net=%x\n", PTR_ERR(clnt),
+                       host->net->ns.inum);
                return PTR_ERR(clnt);
        }
 
index a8e3777..9c36d61 100644 (file)
@@ -57,6 +57,9 @@ static struct task_struct     *nlmsvc_task;
 static struct svc_rqst         *nlmsvc_rqst;
 unsigned long                  nlmsvc_timeout;
 
+atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
+
 unsigned int lockd_net_id;
 
 /*
@@ -259,7 +262,7 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
        if (error < 0)
                goto err_bind;
        set_grace_period(net);
-       dprintk("lockd_up_net: per-net data created; net=%p\n", net);
+       dprintk("%s: per-net data created; net=%x\n", __func__, net->ns.inum);
        return 0;
 
 err_bind:
@@ -274,12 +277,15 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
        if (ln->nlmsvc_users) {
                if (--ln->nlmsvc_users == 0) {
                        nlm_shutdown_hosts_net(net);
+                       cancel_delayed_work_sync(&ln->grace_period_end);
+                       locks_end_grace(&ln->lockd_manager);
                        svc_shutdown_net(serv, net);
-                       dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
+                       dprintk("%s: per-net data destroyed; net=%x\n",
+                               __func__, net->ns.inum);
                }
        } else {
-               printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
-                               nlmsvc_task, net);
+               pr_err("%s: no users! task=%p, net=%x\n",
+                       __func__, nlmsvc_task, net->ns.inum);
                BUG();
        }
 }
@@ -290,7 +296,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
        struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
        struct sockaddr_in sin;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nlm_ntf_refcnt))
                goto out;
 
        if (nlmsvc_rqst) {
@@ -301,6 +308,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
                svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
                        (struct sockaddr *)&sin);
        }
+       atomic_dec(&nlm_ntf_refcnt);
+       wake_up(&nlm_ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -317,7 +326,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
        struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
        struct sockaddr_in6 sin6;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nlm_ntf_refcnt))
                goto out;
 
        if (nlmsvc_rqst) {
@@ -329,6 +339,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
                svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
                        (struct sockaddr *)&sin6);
        }
+       atomic_dec(&nlm_ntf_refcnt);
+       wake_up(&nlm_ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -345,10 +357,12 @@ static void lockd_unregister_notifiers(void)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
 #endif
+       wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
 }
 
 static void lockd_svc_exit_thread(void)
 {
+       atomic_dec(&nlm_ntf_refcnt);
        lockd_unregister_notifiers();
        svc_exit_thread(nlmsvc_rqst);
 }
@@ -373,6 +387,7 @@ static int lockd_start_svc(struct svc_serv *serv)
                goto out_rqst;
        }
 
+       atomic_inc(&nlm_ntf_refcnt);
        svc_sock_update_bufs(serv);
        serv->sv_maxconn = nlm_max_connections;
 
@@ -676,6 +691,17 @@ static int lockd_init_net(struct net *net)
 
 static void lockd_exit_net(struct net *net)
 {
+       struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+       WARN_ONCE(!list_empty(&ln->lockd_manager.list),
+                 "net %x %s: lockd_manager.list is not empty\n",
+                 net->ns.inum, __func__);
+       WARN_ONCE(!list_empty(&ln->nsm_handles),
+                 "net %x %s: nsm_handles list is not empty\n",
+                 net->ns.inum, __func__);
+       WARN_ONCE(delayed_work_pending(&ln->grace_period_end),
+                 "net %x %s: grace_period_end was not cancelled\n",
+                 net->ns.inum, __func__);
 }
 
 static struct pernet_operations lockd_net_ops = {
index a563ddb..4ec3d6e 100644 (file)
@@ -370,7 +370,7 @@ nlmsvc_mark_resources(struct net *net)
 {
        struct nlm_host hint;
 
-       dprintk("lockd: nlmsvc_mark_resources for net %p\n", net);
+       dprintk("lockd: %s for net %x\n", __func__, net ? net->ns.inum : 0);
        hint.net = net;
        nlm_traverse_files(&hint, nlmsvc_mark_host, NULL);
 }
index 1bd71c4..21b4dfa 100644 (file)
 
 static inline bool is_remote_lock(struct file *filp)
 {
-       return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
+       return likely(!(filp->f_path.dentry->d_sb->s_flags & SB_NOREMOTELOCK));
 }
 
 static bool lease_breaking(struct file_lock *fl)
index d818fd2..b8b8b9c 100644 (file)
@@ -269,6 +269,9 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
        struct mb_cache *cache = container_of(shrink, struct mb_cache,
                                              c_shrink);
 
+       /* Unlikely, but not impossible */
+       if (unlikely(cache->c_entry_count < 0))
+               return 0;
        return cache->c_entry_count;
 }
 
index b6829d6..72e308c 100644 (file)
@@ -125,9 +125,9 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
 
        sync_filesystem(sb);
        ms = sbi->s_ms;
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                if (ms->s_state & MINIX_VALID_FS ||
                    !(sbi->s_mount_state & MINIX_VALID_FS))
                        return 0;
index f0c7a7b..9cc91fb 100644 (file)
@@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd,
         * of the daemon to instantiate them before they can be used.
         */
        if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                          LOOKUP_OPEN | LOOKUP_CREATE |
-                          LOOKUP_AUTOMOUNT))) {
-               /* Positive dentry that isn't meant to trigger an
-                * automount, EISDIR will allow it to be used,
-                * otherwise there's no mount here "now" so return
-                * ENOENT.
-                */
-               if (path->dentry->d_inode)
-                       return -EISDIR;
-               else
-                       return -ENOENT;
-       }
+                          LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+           path->dentry->d_inode)
+               return -EISDIR;
 
        if (path->dentry->d_sb->s_user_ns != &init_user_ns)
                return -EACCES;
index e158ec6..9d1374a 100644 (file)
@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
                            SB_DIRSYNC |
                            SB_SILENT |
                            SB_POSIXACL |
+                           SB_LAZYTIME |
                            SB_I_VERSION);
 
        if (flags & MS_REMOUNT)
index 129f193..41de88c 100644 (file)
@@ -103,7 +103,7 @@ static void destroy_inodecache(void)
 static int ncp_remount(struct super_block *sb, int *flags, char* data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return 0;
 }
 
@@ -547,7 +547,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
        else
                default_bufsize = 1024;
 
-       sb->s_flags |= MS_NODIRATIME;   /* probably even noatime */
+       sb->s_flags |= SB_NODIRATIME;   /* probably even noatime */
        sb->s_maxbytes = 0xFFFFFFFFU;
        sb->s_blocksize = 1024; /* Eh...  Is this correct? */
        sb->s_blocksize_bits = 10;
index 0ac2fb1..b9129e2 100644 (file)
@@ -291,12 +291,23 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
        const struct sockaddr *sap = data->addr;
        struct nfs_net *nn = net_generic(data->net, nfs_net_id);
 
+again:
        list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
                const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
                /* Don't match clients that failed to initialise properly */
                if (clp->cl_cons_state < 0)
                        continue;
 
+               /* If a client is still initializing then we need to wait */
+               if (clp->cl_cons_state > NFS_CS_READY) {
+                       refcount_inc(&clp->cl_count);
+                       spin_unlock(&nn->nfs_client_lock);
+                       nfs_wait_client_init_complete(clp);
+                       nfs_put_client(clp);
+                       spin_lock(&nn->nfs_client_lock);
+                       goto again;
+               }
+
                /* Different NFS versions cannot share the same nfs_client */
                if (clp->rpc_ops != data->nfs_mod->rpc_ops)
                        continue;
index e51ae52..2f3f867 100644 (file)
@@ -1256,7 +1256,7 @@ static int nfs_dentry_delete(const struct dentry *dentry)
                /* Unhash it, so that ->d_iput() would be called */
                return 1;
        }
-       if (!(dentry->d_sb->s_flags & MS_ACTIVE)) {
+       if (!(dentry->d_sb->s_flags & SB_ACTIVE)) {
                /* Unhash it, so that ancestors of killed async unlink
                 * files will be cleaned up during umount */
                return 1;
index 38b93d5..b992d23 100644 (file)
@@ -752,7 +752,7 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
         * Note that we only have to check the vfsmount flags here:
         *  - NFS always sets S_NOATIME by so checking it would give a
         *    bogus result
-        *  - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
+        *  - NFS never sets SB_NOATIME or SB_NODIRATIME so there is
         *    no point in checking those.
         */
        if ((path->mnt->mnt_flags & MNT_NOATIME) ||
index 5ab17fd..8357ff6 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/nfs_page.h>
 #include <linux/wait_bit.h>
 
-#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
+#define NFS_MS_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
 
 extern const struct export_operations nfs_export_ops;
 
index 12bbab0..65a7e5d 100644 (file)
@@ -404,15 +404,19 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
        if (error < 0)
                goto error;
 
-       if (!nfs4_has_session(clp))
-               nfs_mark_client_ready(clp, NFS_CS_READY);
-
        error = nfs4_discover_server_trunking(clp, &old);
        if (error < 0)
                goto error;
 
-       if (clp != old)
+       if (clp != old) {
                clp->cl_preserve_clid = true;
+               /*
+                * Mark the client as having failed initialization so other
+                * processes walking the nfs_client_list in nfs_match_client()
+                * won't try to use it.
+                */
+               nfs_mark_client_ready(clp, -EPERM);
+       }
        nfs_put_client(clp);
        clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
        return old;
@@ -539,6 +543,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
 
+               if (pos == new)
+                       goto found;
+
                status = nfs4_match_client(pos, new, &prev, nn);
                if (status < 0)
                        goto out_unlock;
@@ -559,6 +566,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
                 * way that a SETCLIENTID_CONFIRM to pos can succeed is
                 * if new and pos point to the same server:
                 */
+found:
                refcount_inc(&pos->cl_count);
                spin_unlock(&nn->nfs_client_lock);
 
@@ -572,6 +580,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
                case 0:
                        nfs4_swap_callback_idents(pos, new);
                        pos->cl_confirm = new->cl_confirm;
+                       nfs_mark_client_ready(pos, NFS_CS_READY);
 
                        prev = NULL;
                        *result = pos;
index 54fd56d..e4f4a09 100644 (file)
@@ -71,8 +71,8 @@ const nfs4_stateid zero_stateid = {
 };
 const nfs4_stateid invalid_stateid = {
        {
-               .seqid = cpu_to_be32(0xffffffffU),
-               .other = { 0 },
+               /* Funky initialiser keeps older gcc versions happy */
+               .data = { 0xff, 0xff, 0xff, 0xff, 0 },
        },
        .type = NFS4_INVALID_STATEID_TYPE,
 };
index 43cadb2..29bacdc 100644 (file)
@@ -813,9 +813,9 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
         */
        seq_printf(m, "\n\topts:\t");
        seq_puts(m, sb_rdonly(root->d_sb) ? "ro" : "rw");
-       seq_puts(m, root->d_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
-       seq_puts(m, root->d_sb->s_flags & MS_NOATIME ? ",noatime" : "");
-       seq_puts(m, root->d_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_SYNCHRONOUS ? ",sync" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_NOATIME ? ",noatime" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_NODIRATIME ? ",nodiratime" : "");
        nfs_show_mount_options(m, nfss, 1);
 
        seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ);
@@ -2296,11 +2296,11 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
        /*
         * noac is a special case. It implies -o sync, but that's not
         * necessarily reflected in the mtab options. do_remount_sb
-        * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the
+        * will clear SB_SYNCHRONOUS if -o sync wasn't specified in the
         * remount options, so we have to explicitly reset it.
         */
        if (data->flags & NFS_MOUNT_NOAC)
-               *flags |= MS_SYNCHRONOUS;
+               *flags |= SB_SYNCHRONOUS;
 
        /* compare new mount options with old ones */
        error = nfs_compare_remount_data(nfss, data);
@@ -2349,7 +2349,7 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
                /* The VFS shouldn't apply the umask to mode bits. We will do
                 * so ourselves when necessary.
                 */
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
                sb->s_time_gran = 1;
                sb->s_export_op = &nfs_export_ops;
        }
@@ -2379,7 +2379,7 @@ static void nfs_clone_super(struct super_block *sb,
                /* The VFS shouldn't apply the umask to mode bits. We will do
                 * so ourselves when necessary.
                 */
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        }
 
        nfs_initialise_sb(sb);
@@ -2600,11 +2600,11 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
 
        /* -o noac implies -o sync */
        if (server->flags & NFS_MOUNT_NOAC)
-               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+               sb_mntdata.mntflags |= SB_SYNCHRONOUS;
 
        if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
-               if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
-                       sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+               if (mount_info->cloned->sb->s_flags & SB_SYNCHRONOUS)
+                       sb_mntdata.mntflags |= SB_SYNCHRONOUS;
 
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
@@ -2641,7 +2641,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
        if (error)
                goto error_splat_root;
 
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
 
 out:
        return mntroot;
index 5b5f464..4a379d7 100644 (file)
@@ -1890,6 +1890,8 @@ int nfs_commit_inode(struct inode *inode, int how)
        if (res)
                error = nfs_generic_commit_list(inode, &head, how, &cinfo);
        nfs_commit_end(cinfo.mds);
+       if (res == 0)
+               return res;
        if (error < 0)
                goto out_error;
        if (!may_wait)
index 897b299..5be08f0 100644 (file)
@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
        struct list_head *grace_list = net_generic(net, grace_net_id);
 
        spin_lock(&grace_lock);
-       list_add(&lm->list, grace_list);
+       if (list_empty(&lm->list))
+               list_add(&lm->list, grace_list);
+       else
+               WARN(1, "double list_add attempt detected in net %x %s\n",
+                    net->ns.inum, (net == &init_net) ? "(init_net)" : "");
        spin_unlock(&grace_lock);
 }
 EXPORT_SYMBOL_GPL(locks_start_grace);
@@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
 {
        struct list_head *grace_list = net_generic(net, grace_net_id);
 
-       BUG_ON(!list_empty(grace_list));
+       WARN_ONCE(!list_empty(grace_list),
+                 "net %x %s: grace_list is not empty\n",
+                 net->ns.inum, __func__);
 }
 
 static struct pernet_operations grace_net_ops = {
index 697f8ae..fdf2aad 100644 (file)
@@ -61,6 +61,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
                        else
                                gi->gid[i] = rqgi->gid[i];
                }
+
+               /* Each thread allocates its own gi, no race */
+               groups_sort(gi);
        } else {
                gi = get_group_info(rqgi);
        }
index 46b48db..8ceb25a 100644 (file)
@@ -232,7 +232,7 @@ static struct cache_head *expkey_alloc(void)
                return NULL;
 }
 
-static struct cache_detail svc_expkey_cache_template = {
+static const struct cache_detail svc_expkey_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPKEY_HASHMAX,
        .name           = "nfsd.fh",
@@ -748,7 +748,7 @@ static struct cache_head *svc_export_alloc(void)
                return NULL;
 }
 
-static struct cache_detail svc_export_cache_template = {
+static const struct cache_detail svc_export_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPORT_HASHMAX,
        .name           = "nfsd.export",
@@ -1230,7 +1230,7 @@ nfsd_export_init(struct net *net)
        int rv;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("nfsd: initializing export module (net: %p).\n", net);
+       dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum);
 
        nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
        if (IS_ERR(nn->svc_export_cache))
@@ -1278,7 +1278,7 @@ nfsd_export_shutdown(struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("nfsd: shutting down export module (net: %p).\n", net);
+       dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum);
 
        cache_unregister_net(nn->svc_expkey_cache, net);
        cache_unregister_net(nn->svc_export_cache, net);
@@ -1286,5 +1286,5 @@ nfsd_export_shutdown(struct net *net)
        cache_destroy_net(nn->svc_export_cache, net);
        svcauth_unix_purge(net);
 
-       dprintk("nfsd: export shutdown complete (net: %p).\n", net);
+       dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum);
 }
index 1c91391..36358d4 100644 (file)
@@ -119,6 +119,9 @@ struct nfsd_net {
        u32 clverifier_counter;
 
        struct svc_serv *nfsd_serv;
+
+       wait_queue_head_t ntf_wq;
+       atomic_t ntf_refcnt;
 };
 
 /* Simple check to find out if a given net was properly initialized */
index 6b9b6cc..a5bb765 100644 (file)
@@ -178,7 +178,7 @@ static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
 static struct ent *idtoname_update(struct cache_detail *, struct ent *,
                                   struct ent *);
 
-static struct cache_detail idtoname_cache_template = {
+static const struct cache_detail idtoname_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
        .name           = "nfs4.idtoname",
@@ -341,7 +341,7 @@ static struct ent *nametoid_update(struct cache_detail *, struct ent *,
                                   struct ent *);
 static int         nametoid_parse(struct cache_detail *, char *, int);
 
-static struct cache_detail nametoid_cache_template = {
+static const struct cache_detail nametoid_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
        .name           = "nfs4.nametoid",
index b828177..b29b5a1 100644 (file)
@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
 static const stateid_t currentstateid = {
        .si_generation = 1,
 };
+static const stateid_t close_stateid = {
+       .si_generation = 0xffffffffU,
+};
 
 static u64 current_sessionid = 1;
 
 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
+#define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
 
 /* forward declarations */
 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
@@ -83,6 +87,11 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
  */
 static DEFINE_SPINLOCK(state_lock);
 
+enum nfsd4_st_mutex_lock_subclass {
+       OPEN_STATEID_MUTEX = 0,
+       LOCK_STATEID_MUTEX = 1,
+};
+
 /*
  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
  * the refcount on the open stateid to drop.
@@ -3562,7 +3571,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
                /* ignore lock owners */
                if (local->st_stateowner->so_is_open_owner == 0)
                        continue;
-               if (local->st_stateowner == &oo->oo_owner) {
+               if (local->st_stateowner != &oo->oo_owner)
+                       continue;
+               if (local->st_stid.sc_type == NFS4_OPEN_STID) {
                        ret = local;
                        refcount_inc(&ret->st_stid.sc_count);
                        break;
@@ -3571,6 +3582,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
        return ret;
 }
 
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+       __be32 ret = nfs_ok;
+
+       switch (s->sc_type) {
+       default:
+               break;
+       case NFS4_CLOSED_STID:
+       case NFS4_CLOSED_DELEG_STID:
+               ret = nfserr_bad_stateid;
+               break;
+       case NFS4_REVOKED_DELEG_STID:
+               ret = nfserr_deleg_revoked;
+       }
+       return ret;
+}
+
+/* Lock the stateid st_mutex, and deal with races with CLOSE */
+static __be32
+nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
+{
+       __be32 ret;
+
+       mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
+       ret = nfsd4_verify_open_stid(&stp->st_stid);
+       if (ret != nfs_ok)
+               mutex_unlock(&stp->st_mutex);
+       return ret;
+}
+
+static struct nfs4_ol_stateid *
+nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
+{
+       struct nfs4_ol_stateid *stp;
+       for (;;) {
+               spin_lock(&fp->fi_lock);
+               stp = nfsd4_find_existing_open(fp, open);
+               spin_unlock(&fp->fi_lock);
+               if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
+                       break;
+               nfs4_put_stid(&stp->st_stid);
+       }
+       return stp;
+}
+
 static struct nfs4_openowner *
 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
                           struct nfsd4_compound_state *cstate)
@@ -3613,8 +3670,9 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
        stp = open->op_stp;
        /* We are moving these outside of the spinlocks to avoid the warnings */
        mutex_init(&stp->st_mutex);
-       mutex_lock(&stp->st_mutex);
+       mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
 
+retry:
        spin_lock(&oo->oo_owner.so_client->cl_lock);
        spin_lock(&fp->fi_lock);
 
@@ -3639,7 +3697,11 @@ out_unlock:
        spin_unlock(&fp->fi_lock);
        spin_unlock(&oo->oo_owner.so_client->cl_lock);
        if (retstp) {
-               mutex_lock(&retstp->st_mutex);
+               /* Handle races with CLOSE */
+               if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+                       nfs4_put_stid(&retstp->st_stid);
+                       goto retry;
+               }
                /* To keep mutex tracking happy */
                mutex_unlock(&stp->st_mutex);
                stp = retstp;
@@ -4449,6 +4511,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        struct nfs4_ol_stateid *stp = NULL;
        struct nfs4_delegation *dp = NULL;
        __be32 status;
+       bool new_stp = false;
 
        /*
         * Lookup file; if found, lookup stateid and check open request,
@@ -4460,9 +4523,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                status = nfs4_check_deleg(cl, open, &dp);
                if (status)
                        goto out;
-               spin_lock(&fp->fi_lock);
-               stp = nfsd4_find_existing_open(fp, open);
-               spin_unlock(&fp->fi_lock);
+               stp = nfsd4_find_and_lock_existing_open(fp, open);
        } else {
                open->op_file = NULL;
                status = nfserr_bad_stateid;
@@ -4470,35 +4531,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                        goto out;
        }
 
+       if (!stp) {
+               stp = init_open_stateid(fp, open);
+               if (!open->op_stp)
+                       new_stp = true;
+       }
+
        /*
         * OPEN the file, or upgrade an existing OPEN.
         * If truncate fails, the OPEN fails.
+        *
+        * stp is already locked.
         */
-       if (stp) {
+       if (!new_stp) {
                /* Stateid was found, this is an OPEN upgrade */
-               mutex_lock(&stp->st_mutex);
                status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
                if (status) {
                        mutex_unlock(&stp->st_mutex);
                        goto out;
                }
        } else {
-               /* stp is returned locked. */
-               stp = init_open_stateid(fp, open);
-               /* See if we lost the race to some other thread */
-               if (stp->st_access_bmap != 0) {
-                       status = nfs4_upgrade_open(rqstp, fp, current_fh,
-                                               stp, open);
-                       if (status) {
-                               mutex_unlock(&stp->st_mutex);
-                               goto out;
-                       }
-                       goto upgrade_out;
-               }
                status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       mutex_unlock(&stp->st_mutex);
+                       stp->st_stid.sc_type = NFS4_CLOSED_STID;
                        release_open_stateid(stp);
+                       mutex_unlock(&stp->st_mutex);
                        goto out;
                }
 
@@ -4507,7 +4564,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                if (stp->st_clnt_odstate == open->op_odstate)
                        open->op_odstate = NULL;
        }
-upgrade_out:
+
        nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
        mutex_unlock(&stp->st_mutex);
 
@@ -4734,7 +4791,7 @@ nfs4_laundromat(struct nfsd_net *nn)
        spin_unlock(&nn->blocked_locks_lock);
 
        while (!list_empty(&reaplist)) {
-               nbl = list_first_entry(&nn->blocked_locks_lru,
+               nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
                posix_unblock_lock(&nbl->nbl_lock);
@@ -4855,6 +4912,18 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
        return nfserr_old_stateid;
 }
 
+static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
+{
+       __be32 ret;
+
+       spin_lock(&s->sc_lock);
+       ret = nfsd4_verify_open_stid(s);
+       if (ret == nfs_ok)
+               ret = check_stateid_generation(in, &s->sc_stateid, has_session);
+       spin_unlock(&s->sc_lock);
+       return ret;
+}
+
 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
 {
        if (ols->st_stateowner->so_is_open_owner &&
@@ -4868,7 +4937,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
        struct nfs4_stid *s;
        __be32 status = nfserr_bad_stateid;
 
-       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+               CLOSE_STATEID(stateid))
                return status;
        /* Client debugging aid. */
        if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
@@ -4883,7 +4953,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
        s = find_stateid_locked(cl, stateid);
        if (!s)
                goto out_unlock;
-       status = check_stateid_generation(stateid, &s->sc_stateid, 1);
+       status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
        if (status)
                goto out_unlock;
        switch (s->sc_type) {
@@ -4926,7 +4996,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
        else if (typemask & NFS4_DELEG_STID)
                typemask |= NFS4_REVOKED_DELEG_STID;
 
-       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+               CLOSE_STATEID(stateid))
                return nfserr_bad_stateid;
        status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
        if (status == nfserr_stale_clientid) {
@@ -5044,7 +5115,7 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
                                &s, nn);
        if (status)
                return status;
-       status = check_stateid_generation(stateid, &s->sc_stateid,
+       status = nfsd4_stid_check_stateid_generation(stateid, s,
                        nfsd4_has_session(cstate));
        if (status)
                goto out;
@@ -5098,7 +5169,9 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
        struct nfs4_ol_stateid *stp = openlockstateid(s);
        __be32 ret;
 
-       mutex_lock(&stp->st_mutex);
+       ret = nfsd4_lock_ol_stateid(stp);
+       if (ret)
+               goto out_put_stid;
 
        ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
        if (ret)
@@ -5109,11 +5182,13 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
                            lockowner(stp->st_stateowner)))
                goto out;
 
+       stp->st_stid.sc_type = NFS4_CLOSED_STID;
        release_lock_stateid(stp);
        ret = nfs_ok;
 
 out:
        mutex_unlock(&stp->st_mutex);
+out_put_stid:
        nfs4_put_stid(s);
        return ret;
 }
@@ -5133,6 +5208,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        s = find_stateid_locked(cl, stateid);
        if (!s)
                goto out_unlock;
+       spin_lock(&s->sc_lock);
        switch (s->sc_type) {
        case NFS4_DELEG_STID:
                ret = nfserr_locks_held;
@@ -5144,11 +5220,13 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                ret = nfserr_locks_held;
                break;
        case NFS4_LOCK_STID:
+               spin_unlock(&s->sc_lock);
                refcount_inc(&s->sc_count);
                spin_unlock(&cl->cl_lock);
                ret = nfsd4_free_lock_stateid(stateid, s);
                goto out;
        case NFS4_REVOKED_DELEG_STID:
+               spin_unlock(&s->sc_lock);
                dp = delegstateid(s);
                list_del_init(&dp->dl_recall_lru);
                spin_unlock(&cl->cl_lock);
@@ -5157,6 +5235,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                goto out;
        /* Default falls through and returns nfserr_bad_stateid */
        }
+       spin_unlock(&s->sc_lock);
 out_unlock:
        spin_unlock(&cl->cl_lock);
 out:
@@ -5179,15 +5258,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
        status = nfsd4_check_seqid(cstate, sop, seqid);
        if (status)
                return status;
-       if (stp->st_stid.sc_type == NFS4_CLOSED_STID
-               || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
-               /*
-                * "Closed" stateid's exist *only* to return
-                * nfserr_replay_me from the previous step, and
-                * revoked delegations are kept only for free_stateid.
-                */
-               return nfserr_bad_stateid;
-       mutex_lock(&stp->st_mutex);
+       status = nfsd4_lock_ol_stateid(stp);
+       if (status != nfs_ok)
+               return status;
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status == nfs_ok)
                status = nfs4_check_fh(current_fh, &stp->st_stid);
@@ -5367,7 +5440,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
        bool unhashed;
        LIST_HEAD(reaplist);
 
-       s->st_stid.sc_type = NFS4_CLOSED_STID;
        spin_lock(&clp->cl_lock);
        unhashed = unhash_open_stateid(s, &reaplist);
 
@@ -5407,10 +5479,17 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        nfsd4_bump_seqid(cstate, status);
        if (status)
                goto out; 
+
+       stp->st_stid.sc_type = NFS4_CLOSED_STID;
        nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
-       mutex_unlock(&stp->st_mutex);
 
        nfsd4_close_open_stateid(stp);
+       mutex_unlock(&stp->st_mutex);
+
+       /* See RFC5661 sectionm 18.2.4 */
+       if (stp->st_stid.sc_client->cl_minorversion)
+               memcpy(&close->cl_stateid, &close_stateid,
+                               sizeof(close->cl_stateid));
 
        /* put reference from nfs4_preprocess_seqid_op */
        nfs4_put_stid(&stp->st_stid);
@@ -5436,7 +5515,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (status)
                goto out;
        dp = delegstateid(s);
-       status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
+       status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
        if (status)
                goto put_stateid;
 
@@ -5642,14 +5721,41 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
        return ret;
 }
 
-static void
+static struct nfs4_ol_stateid *
+find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
+{
+       struct nfs4_ol_stateid *lst;
+       struct nfs4_client *clp = lo->lo_owner.so_client;
+
+       lockdep_assert_held(&clp->cl_lock);
+
+       list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
+               if (lst->st_stid.sc_type != NFS4_LOCK_STID)
+                       continue;
+               if (lst->st_stid.sc_file == fp) {
+                       refcount_inc(&lst->st_stid.sc_count);
+                       return lst;
+               }
+       }
+       return NULL;
+}
+
+static struct nfs4_ol_stateid *
 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
                  struct nfs4_file *fp, struct inode *inode,
                  struct nfs4_ol_stateid *open_stp)
 {
        struct nfs4_client *clp = lo->lo_owner.so_client;
+       struct nfs4_ol_stateid *retstp;
 
-       lockdep_assert_held(&clp->cl_lock);
+       mutex_init(&stp->st_mutex);
+       mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+retry:
+       spin_lock(&clp->cl_lock);
+       spin_lock(&fp->fi_lock);
+       retstp = find_lock_stateid(lo, fp);
+       if (retstp)
+               goto out_unlock;
 
        refcount_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_LOCK_STID;
@@ -5659,29 +5765,22 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
-       mutex_init(&stp->st_mutex);
        list_add(&stp->st_locks, &open_stp->st_locks);
        list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
-       spin_lock(&fp->fi_lock);
        list_add(&stp->st_perfile, &fp->fi_stateids);
+out_unlock:
        spin_unlock(&fp->fi_lock);
-}
-
-static struct nfs4_ol_stateid *
-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
-{
-       struct nfs4_ol_stateid *lst;
-       struct nfs4_client *clp = lo->lo_owner.so_client;
-
-       lockdep_assert_held(&clp->cl_lock);
-
-       list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
-               if (lst->st_stid.sc_file == fp) {
-                       refcount_inc(&lst->st_stid.sc_count);
-                       return lst;
+       spin_unlock(&clp->cl_lock);
+       if (retstp) {
+               if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+                       nfs4_put_stid(&retstp->st_stid);
+                       goto retry;
                }
+               /* To keep mutex tracking happy */
+               mutex_unlock(&stp->st_mutex);
+               stp = retstp;
        }
-       return NULL;
+       return stp;
 }
 
 static struct nfs4_ol_stateid *
@@ -5694,26 +5793,25 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
        struct nfs4_openowner *oo = openowner(ost->st_stateowner);
        struct nfs4_client *clp = oo->oo_owner.so_client;
 
+       *new = false;
        spin_lock(&clp->cl_lock);
        lst = find_lock_stateid(lo, fi);
-       if (lst == NULL) {
-               spin_unlock(&clp->cl_lock);
-               ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
-               if (ns == NULL)
-                       return NULL;
-
-               spin_lock(&clp->cl_lock);
-               lst = find_lock_stateid(lo, fi);
-               if (likely(!lst)) {
-                       lst = openlockstateid(ns);
-                       init_lock_stateid(lst, lo, fi, inode, ost);
-                       ns = NULL;
-                       *new = true;
-               }
-       }
        spin_unlock(&clp->cl_lock);
-       if (ns)
+       if (lst != NULL) {
+               if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
+                       goto out;
+               nfs4_put_stid(&lst->st_stid);
+       }
+       ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
+       if (ns == NULL)
+               return NULL;
+
+       lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
+       if (lst == openlockstateid(ns))
+               *new = true;
+       else
                nfs4_put_stid(ns);
+out:
        return lst;
 }
 
@@ -5750,7 +5848,6 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
        struct nfs4_lockowner *lo;
        struct nfs4_ol_stateid *lst;
        unsigned int strhashval;
-       bool hashed;
 
        lo = find_lockowner_str(cl, &lock->lk_new_owner);
        if (!lo) {
@@ -5766,25 +5863,12 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
                        goto out;
        }
 
-retry:
        lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
        if (lst == NULL) {
                status = nfserr_jukebox;
                goto out;
        }
 
-       mutex_lock(&lst->st_mutex);
-
-       /* See if it's still hashed to avoid race with FREE_STATEID */
-       spin_lock(&cl->cl_lock);
-       hashed = !list_empty(&lst->st_perfile);
-       spin_unlock(&cl->cl_lock);
-
-       if (!hashed) {
-               mutex_unlock(&lst->st_mutex);
-               nfs4_put_stid(&lst->st_stid);
-               goto retry;
-       }
        status = nfs_ok;
        *plst = lst;
 out:
@@ -5990,14 +6074,16 @@ out:
                    seqid_mutating_err(ntohl(status)))
                        lock_sop->lo_owner.so_seqid++;
 
-               mutex_unlock(&lock_stp->st_mutex);
-
                /*
                 * If this is a new, never-before-used stateid, and we are
                 * returning an error, then just go ahead and release it.
                 */
-               if (status && new)
+               if (status && new) {
+                       lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
                        release_lock_stateid(lock_stp);
+               }
+
+               mutex_unlock(&lock_stp->st_mutex);
 
                nfs4_put_stid(&lock_stp->st_stid);
        }
@@ -7017,6 +7103,10 @@ static int nfs4_state_create_net(struct net *net)
                INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
        nn->conf_name_tree = RB_ROOT;
        nn->unconf_name_tree = RB_ROOT;
+       nn->boot_time = get_seconds();
+       nn->grace_ended = false;
+       nn->nfsd4_manager.block_opens = true;
+       INIT_LIST_HEAD(&nn->nfsd4_manager.list);
        INIT_LIST_HEAD(&nn->client_lru);
        INIT_LIST_HEAD(&nn->close_lru);
        INIT_LIST_HEAD(&nn->del_recall_lru);
@@ -7074,9 +7164,6 @@ nfs4_state_start_net(struct net *net)
        ret = nfs4_state_create_net(net);
        if (ret)
                return ret;
-       nn->boot_time = get_seconds();
-       nn->grace_ended = false;
-       nn->nfsd4_manager.block_opens = true;
        locks_start_grace(net, &nn->nfsd4_manager);
        nfsd4_client_tracking_init(net);
        printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
@@ -7153,7 +7240,7 @@ nfs4_state_shutdown_net(struct net *net)
        spin_unlock(&nn->blocked_locks_lock);
 
        while (!list_empty(&reaplist)) {
-               nbl = list_first_entry(&nn->blocked_locks_lru,
+               nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
                posix_unblock_lock(&nbl->nbl_lock);
index 6493df6..d107b44 100644 (file)
@@ -1241,6 +1241,9 @@ static __net_init int nfsd_init_net(struct net *net)
        nn->nfsd4_grace = 90;
        nn->clverifier_counter = prandom_u32();
        nn->clientid_counter = prandom_u32();
+
+       atomic_set(&nn->ntf_refcnt, 0);
+       init_waitqueue_head(&nn->ntf_wq);
        return 0;
 
 out_idmap_error:
index 33117d4..89cb484 100644 (file)
@@ -335,7 +335,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct sockaddr_in sin;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nn->ntf_refcnt))
                goto out;
 
        if (nn->nfsd_serv) {
@@ -344,6 +345,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
                sin.sin_addr.s_addr = ifa->ifa_local;
                svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
        }
+       atomic_dec(&nn->ntf_refcnt);
+       wake_up(&nn->ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -363,7 +366,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct sockaddr_in6 sin6;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nn->ntf_refcnt))
                goto out;
 
        if (nn->nfsd_serv) {
@@ -374,7 +378,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
                        sin6.sin6_scope_id = ifa->idev->dev->ifindex;
                svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
        }
-
+       atomic_dec(&nn->ntf_refcnt);
+       wake_up(&nn->ntf_wq);
 out:
        return NOTIFY_DONE;
 }
@@ -391,6 +396,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
+       atomic_dec(&nn->ntf_refcnt);
        /* check if the notifier still has clients */
        if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
                unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -398,6 +404,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
                unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
 #endif
        }
+       wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
 
        /*
         * write_ports can create the server without actually starting
@@ -517,6 +524,7 @@ int nfsd_create_serv(struct net *net)
                register_inet6addr_notifier(&nfsd_inet6addr_notifier);
 #endif
        }
+       atomic_inc(&nn->ntf_refcnt);
        ktime_get_real_ts64(&nn->nfssvc_boot); /* record boot time */
        return 0;
 }
index f572538..9f3ffba 100644 (file)
@@ -1979,7 +1979,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                                             struct the_nilfs *nilfs)
 {
        struct nilfs_inode_info *ii, *n;
-       int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+       int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
        int defer_iput = false;
 
        spin_lock(&nilfs->ns_inode_lock);
index 3ce20cd..3073b64 100644 (file)
@@ -141,7 +141,7 @@ void __nilfs_error(struct super_block *sb, const char *function,
 
                if (nilfs_test_opt(nilfs, ERRORS_RO)) {
                        printk(KERN_CRIT "Remounting filesystem read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
        }
 
@@ -869,7 +869,7 @@ int nilfs_store_magic_and_option(struct super_block *sb,
 
        /* FS independent flags */
 #ifdef NILFS_ATIME_DISABLE
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
 #endif
 
        nilfs_set_default_options(sb, sbp);
@@ -1133,7 +1133,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                err = -EINVAL;
                goto restore_opts;
        }
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
 
        err = -EINVAL;
 
@@ -1143,12 +1143,12 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                /* Shutting down log writer */
                nilfs_detach_log_writer(sb);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
                /*
                 * Remounting a valid RW partition RDONLY, so set
@@ -1178,7 +1178,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                        goto restore_opts;
                }
 
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                root = NILFS_I(d_inode(sb->s_root))->i_root;
                err = nilfs_attach_log_writer(sb, root);
@@ -1212,7 +1212,7 @@ static int nilfs_parse_snapshot_option(const char *option,
        const char *msg = NULL;
        int err;
 
-       if (!(sd->flags & MS_RDONLY)) {
+       if (!(sd->flags & SB_RDONLY)) {
                msg = "read-only option is not specified";
                goto parse_error;
        }
@@ -1286,7 +1286,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
        struct dentry *root_dentry;
        int err, s_new = false;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
@@ -1327,14 +1327,14 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
                snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
                sb_set_blocksize(s, block_size(sd.bdev));
 
-               err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
+               err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0);
                if (err)
                        goto failed_super;
 
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
        } else if (!sd.cno) {
                if (nilfs_tree_is_busy(s->s_root)) {
-                       if ((flags ^ s->s_flags) & MS_RDONLY) {
+                       if ((flags ^ s->s_flags) & SB_RDONLY) {
                                nilfs_msg(s, KERN_ERR,
                                          "the device already has a %s mount.",
                                          sb_rdonly(s) ? "read-only" : "read/write");
index afebb50..1a85317 100644 (file)
@@ -220,7 +220,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
 
        if (!valid_fs) {
                nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
-               if (s_flags & MS_RDONLY) {
+               if (s_flags & SB_RDONLY) {
                        nilfs_msg(sb, KERN_INFO,
                                  "recovery required for readonly filesystem");
                        nilfs_msg(sb, KERN_INFO,
@@ -286,7 +286,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
        if (valid_fs)
                goto skip_recovery;
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                __u64 features;
 
                if (nilfs_test_opt(nilfs, NORECOVERY)) {
@@ -309,7 +309,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
                        err = -EROFS;
                        goto failed_unload;
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
        } else if (nilfs_test_opt(nilfs, NORECOVERY)) {
                nilfs_msg(sb, KERN_ERR,
                          "recovery cancelled because norecovery option was specified for a read/write mount");
index 81d8959..219b269 100644 (file)
@@ -67,7 +67,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
 
                /*
                 * If i_count is zero, the inode cannot have any watches and
-                * doing an __iget/iput with MS_ACTIVE clear would actually
+                * doing an __iget/iput with SB_ACTIVE clear would actually
                 * evict all inodes with zero i_count from icache which is
                 * unnecessarily violent and may in fact be illegal to do.
                 */
index ef243e1..7c6f76d 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -255,5 +255,5 @@ void __init nsfs_init(void)
        nsfs_mnt = kern_mount(&nsfs);
        if (IS_ERR(nsfs_mnt))
                panic("can't set nsfs up\n");
-       nsfs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+       nsfs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
 }
index 3f70f04..bb7159f 100644 (file)
@@ -473,7 +473,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
 
 #ifndef NTFS_RW
        /* For read-only compiled driver, enforce read-only flag. */
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
 #else /* NTFS_RW */
        /*
         * For the read-write compiled driver, if we are remounting read-write,
@@ -487,7 +487,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
         * When remounting read-only, mark the volume clean if no volume errors
         * have occurred.
         */
-       if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+       if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
                static const char *es = ".  Cannot remount read-write.";
 
                /* Remounting read-write. */
@@ -548,7 +548,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
                        NVolSetErrors(vol);
                        return -EROFS;
                }
-       } else if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+       } else if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
                /* Remounting read-only. */
                if (!NVolErrors(vol)) {
                        if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
@@ -1799,7 +1799,7 @@ static bool load_system_files(ntfs_volume *vol)
                                                es3);
                                goto iput_mirr_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s",
                                        !vol->mftmirr_ino ? es1 : es2, es3);
                } else
@@ -1937,7 +1937,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_vol_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -1974,7 +1974,7 @@ get_ctx_vol_failed:
                                }
                                goto iput_logfile_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2019,7 +2019,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_root_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2042,7 +2042,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                /*
                 * Do not set NVolErrors() because ntfs_remount() might manage
                 * to set the dirty flag in which case all would be well.
@@ -2055,7 +2055,7 @@ get_ctx_vol_failed:
         * If (still) a read-write mount, set the NT4 compatibility flag on
         * newer NTFS version volumes.
         */
-       if (!(sb->s_flags & MS_RDONLY) && (vol->major_ver > 1) &&
+       if (!(sb->s_flags & SB_RDONLY) && (vol->major_ver > 1) &&
                        ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
                static const char *es1 = "Failed to set NT4 compatibility flag";
                static const char *es2 = ".  Run chkdsk.";
@@ -2069,7 +2069,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif
@@ -2087,7 +2087,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif /* NTFS_RW */
@@ -2128,7 +2128,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_quota_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2150,7 +2150,7 @@ get_ctx_vol_failed:
                        goto iput_quota_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
        /*
@@ -2171,7 +2171,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_usnjrnl_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2194,7 +2194,7 @@ get_ctx_vol_failed:
                        goto iput_usnjrnl_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif /* NTFS_RW */
@@ -2728,7 +2728,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
        lockdep_off();
        ntfs_debug("Entering.");
 #ifndef NTFS_RW
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
 #endif /* ! NTFS_RW */
        /* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
        sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
index dc455d4..a1d0510 100644 (file)
@@ -227,7 +227,7 @@ int ocfs2_should_update_atime(struct inode *inode,
                return 0;
 
        if ((inode->i_flags & S_NOATIME) ||
-           ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
+           ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
                return 0;
 
        /*
index 040bbb6..80efa56 100644 (file)
@@ -675,9 +675,9 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
        }
 
        /* We're going to/from readonly mode. */
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
                /* Disable quota accounting before remounting RO */
-               if (*flags & MS_RDONLY) {
+               if (*flags & SB_RDONLY) {
                        ret = ocfs2_susp_quotas(osb, 0);
                        if (ret < 0)
                                goto out;
@@ -691,8 +691,8 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                        goto unlock_osb;
                }
 
-               if (*flags & MS_RDONLY) {
-                       sb->s_flags |= MS_RDONLY;
+               if (*flags & SB_RDONLY) {
+                       sb->s_flags |= SB_RDONLY;
                        osb->osb_flags |= OCFS2_OSB_SOFT_RO;
                } else {
                        if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
@@ -709,14 +709,14 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                                ret = -EINVAL;
                                goto unlock_osb;
                        }
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
                        osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
                }
                trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
 unlock_osb:
                spin_unlock(&osb->osb_lock);
                /* Enable quota accounting after remounting RW */
-               if (!ret && !(*flags & MS_RDONLY)) {
+               if (!ret && !(*flags & SB_RDONLY)) {
                        if (sb_any_quota_suspended(sb))
                                ret = ocfs2_susp_quotas(osb, 1);
                        else
@@ -724,7 +724,7 @@ unlock_osb:
                        if (ret < 0) {
                                /* Return back changes... */
                                spin_lock(&osb->osb_lock);
-                               sb->s_flags |= MS_RDONLY;
+                               sb->s_flags |= SB_RDONLY;
                                osb->osb_flags |= OCFS2_OSB_SOFT_RO;
                                spin_unlock(&osb->osb_lock);
                                goto out;
@@ -744,9 +744,9 @@ unlock_osb:
                if (!ocfs2_is_hard_readonly(osb))
                        ocfs2_set_journal_params(osb);
 
-               sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+               sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
                        ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
-                                                       MS_POSIXACL : 0);
+                                                       SB_POSIXACL : 0);
        }
 out:
        return ret;
@@ -1057,10 +1057,10 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic = OCFS2_SUPER_MAGIC;
 
-       sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
-               ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~(SB_POSIXACL | SB_NOSEC)) |
+               ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
 
-       /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
+       /* Hard readonly mode only if: bdev_read_only, SB_RDONLY,
         * heartbeat=none */
        if (bdev_read_only(sb->s_bdev)) {
                if (!sb_rdonly(sb)) {
@@ -2057,7 +2057,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
        sb->s_xattr = ocfs2_xattr_handlers;
        sb->s_time_gran = 1;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
        /* this is needed to support O_LARGEFILE */
        cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
        bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
@@ -2568,7 +2568,7 @@ static int ocfs2_handle_error(struct super_block *sb)
                        return rv;
 
                pr_crit("OCFS2: File system is now read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                ocfs2_set_ro_flag(osb, 0);
        }
 
index 5fdf269..c5898c5 100644 (file)
@@ -901,7 +901,7 @@ static int ocfs2_xattr_list_entry(struct super_block *sb,
 
        case OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS:
        case OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT:
-               if (!(sb->s_flags & MS_POSIXACL))
+               if (!(sb->s_flags & SB_POSIXACL))
                        return 0;
                break;
 
index 13215f2..2200662 100644 (file)
@@ -369,7 +369,7 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
 static int openprom_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
@@ -386,7 +386,7 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
        struct op_inode_info *oi;
        int ret;
 
-       s->s_flags |= MS_NOATIME;
+       s->s_flags |= SB_NOATIME;
        s->s_blocksize = 1024;
        s->s_blocksize_bits = 10;
        s->s_magic = OPENPROM_SUPER_MAGIC;
index ded456f..c584ad8 100644 (file)
@@ -162,7 +162,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
        struct orangefs_kernel_op_s *op, *temp;
        __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
        static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
-       struct orangefs_kernel_op_s *cur_op = NULL;
+       struct orangefs_kernel_op_s *cur_op;
        unsigned long ret;
 
        /* We do not support blocking IO. */
@@ -186,6 +186,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
                return -EAGAIN;
 
 restart:
+       cur_op = NULL;
        /* Get next op (if any) from top of list. */
        spin_lock(&orangefs_request_list_lock);
        list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
index 1668fd6..0d228cd 100644 (file)
@@ -452,7 +452,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
 static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *file = iocb->ki_filp;
-       loff_t pos = *(&iocb->ki_pos);
+       loff_t pos = iocb->ki_pos;
        ssize_t rc = 0;
 
        BUG_ON(iocb->private);
@@ -492,9 +492,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
                }
        }
 
-       if (file->f_pos > i_size_read(file->f_mapping->host))
-               orangefs_i_size_write(file->f_mapping->host, file->f_pos);
-
        rc = generic_write_checks(iocb, iter);
 
        if (rc <= 0) {
@@ -508,7 +505,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
         * pos to the end of the file, so we will wait till now to set
         * pos...
         */
-       pos = *(&iocb->ki_pos);
+       pos = iocb->ki_pos;
 
        rc = do_readv_writev(ORANGEFS_IO_WRITE,
                             file,
index 97adf7d..2595453 100644 (file)
@@ -533,17 +533,6 @@ do {                                                                       \
        sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE;                  \
 } while (0)
 
-static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
-{
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-       inode_lock(inode);
-#endif
-       i_size_write(inode, i_size);
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-       inode_unlock(inode);
-#endif
-}
-
 static inline void orangefs_set_timeout(struct dentry *dentry)
 {
        unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
index 366750e..36f1390 100644 (file)
@@ -40,7 +40,7 @@ static int orangefs_show_options(struct seq_file *m, struct dentry *root)
 {
        struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(root->d_sb);
 
-       if (root->d_sb->s_flags & MS_POSIXACL)
+       if (root->d_sb->s_flags & SB_POSIXACL)
                seq_puts(m, ",acl");
        if (orangefs_sb->flags & ORANGEFS_OPT_INTR)
                seq_puts(m, ",intr");
@@ -60,7 +60,7 @@ static int parse_mount_options(struct super_block *sb, char *options,
         * Force any potential flags that might be set from the mount
         * to zero, ie, initialize to unset.
         */
-       sb->s_flags &= ~MS_POSIXACL;
+       sb->s_flags &= ~SB_POSIXACL;
        orangefs_sb->flags &= ~ORANGEFS_OPT_INTR;
        orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK;
 
@@ -73,7 +73,7 @@ static int parse_mount_options(struct super_block *sb, char *options,
                token = match_token(p, tokens, args);
                switch (token) {
                case Opt_acl:
-                       sb->s_flags |= MS_POSIXACL;
+                       sb->s_flags |= SB_POSIXACL;
                        break;
                case Opt_intr:
                        orangefs_sb->flags |= ORANGEFS_OPT_INTR;
@@ -507,7 +507,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
 
        ret = orangefs_fill_sb(sb,
              &new_op->downcall.resp.fs_mount, data,
-             flags & MS_SILENT ? 1 : 0);
+             flags & SB_SILENT ? 1 : 0);
 
        if (ret) {
                d = ERR_PTR(ret);
index 835c6e1..0577d6d 100644 (file)
@@ -29,10 +29,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
  */
 void purge_waiting_ops(void)
 {
-       struct orangefs_kernel_op_s *op;
+       struct orangefs_kernel_op_s *op, *tmp;
 
        spin_lock(&orangefs_request_list_lock);
-       list_for_each_entry(op, &orangefs_request_list, list) {
+       list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
                gossip_debug(GOSSIP_WAIT_DEBUG,
                             "pvfs2-client-core: purging op tag %llu %s\n",
                             llu(op->tag),
index cbfc196..5ac4154 100644 (file)
@@ -24,6 +24,16 @@ config OVERLAY_FS_REDIRECT_DIR
          an overlay which has redirects on a kernel that doesn't support this
          feature will have unexpected results.
 
+config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
+       bool "Overlayfs: follow redirects even if redirects are turned off"
+       default y
+       depends on OVERLAY_FS
+       help
+         Disable this to get a possibly more secure configuration, but that
+         might not be backward compatible with previous kernels.
+
+         For more information, see Documentation/filesystems/overlayfs.txt
+
 config OVERLAY_FS_INDEX
        bool "Overlayfs: turn on inodes index feature by default"
        depends on OVERLAY_FS
index e139218..f9788bc 100644 (file)
@@ -887,7 +887,8 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
                spin_unlock(&dentry->d_lock);
        } else {
                kfree(redirect);
-               pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
+               pr_warn_ratelimited("overlayfs: failed to set redirect (%i)\n",
+                                   err);
                /* Fall back to userspace copy-up */
                err = -EXDEV;
        }
index 625ed80..beb945e 100644 (file)
@@ -435,7 +435,7 @@ int ovl_verify_index(struct dentry *index, struct ovl_path *lower,
 
        /* Check if index is orphan and don't warn before cleaning it */
        if (d_inode(index)->i_nlink == 1 &&
-           ovl_get_nlink(index, origin.dentry, 0) == 0)
+           ovl_get_nlink(origin.dentry, index, 0) == 0)
                err = -ENOENT;
 
        dput(origin.dentry);
@@ -681,6 +681,22 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                if (d.stop)
                        break;
 
+               /*
+                * Following redirects can have security consequences: it's like
+                * a symlink into the lower layer without the permission checks.
+                * This is only a problem if the upper layer is untrusted (e.g
+                * comes from an USB drive).  This can allow a non-readable file
+                * or directory to become readable.
+                *
+                * Only following redirects when redirects are enabled disables
+                * this attack vector when not necessary.
+                */
+               err = -EPERM;
+               if (d.redirect && !ofs->config.redirect_follow) {
+                       pr_warn_ratelimited("overlay: refusing to follow redirect for (%pd2)\n", dentry);
+                       goto out_put;
+               }
+
                if (d.redirect && d.redirect[0] == '/' && poe != roe) {
                        poe = roe;
 
index 13eab09..b489099 100644 (file)
@@ -180,7 +180,7 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
 static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode)
 {
        struct dentry *ret = vfs_tmpfile(dentry, mode, 0);
-       int err = IS_ERR(ret) ? PTR_ERR(ret) : 0;
+       int err = PTR_ERR_OR_ZERO(ret);
 
        pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err);
        return ret;
index 752bab6..9d0bc03 100644 (file)
@@ -14,6 +14,8 @@ struct ovl_config {
        char *workdir;
        bool default_permissions;
        bool redirect_dir;
+       bool redirect_follow;
+       const char *redirect_mode;
        bool index;
 };
 
index 0daa435..8c98578 100644 (file)
@@ -499,7 +499,7 @@ out:
        return err;
 
 fail:
-       pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n",
+       pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n",
                            p->name, err);
        goto out;
 }
@@ -663,7 +663,10 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
                        return PTR_ERR(rdt.cache);
        }
 
-       return iterate_dir(od->realfile, &rdt.ctx);
+       err = iterate_dir(od->realfile, &rdt.ctx);
+       ctx->pos = rdt.ctx.pos;
+
+       return err;
 }
 
 
index be03578..76440fe 100644 (file)
@@ -33,6 +33,13 @@ module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644);
 MODULE_PARM_DESC(ovl_redirect_dir_def,
                 "Default to on or off for the redirect_dir feature");
 
+static bool ovl_redirect_always_follow =
+       IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW);
+module_param_named(redirect_always_follow, ovl_redirect_always_follow,
+                  bool, 0644);
+MODULE_PARM_DESC(ovl_redirect_always_follow,
+                "Follow redirects even if redirect_dir feature is turned off");
+
 static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX);
 module_param_named(index, ovl_index_def, bool, 0644);
 MODULE_PARM_DESC(ovl_index_def,
@@ -232,6 +239,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
        kfree(ofs->config.lowerdir);
        kfree(ofs->config.upperdir);
        kfree(ofs->config.workdir);
+       kfree(ofs->config.redirect_mode);
        if (ofs->creator_cred)
                put_cred(ofs->creator_cred);
        kfree(ofs);
@@ -244,6 +252,7 @@ static void ovl_put_super(struct super_block *sb)
        ovl_free_fs(ofs);
 }
 
+/* Sync real dirty inodes in upper filesystem (if it exists) */
 static int ovl_sync_fs(struct super_block *sb, int wait)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
@@ -252,14 +261,24 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
 
        if (!ofs->upper_mnt)
                return 0;
-       upper_sb = ofs->upper_mnt->mnt_sb;
-       if (!upper_sb->s_op->sync_fs)
+
+       /*
+        * If this is a sync(2) call or an emergency sync, all the super blocks
+        * will be iterated, including upper_sb, so no need to do anything.
+        *
+        * If this is a syncfs(2) call, then we do need to call
+        * sync_filesystem() on upper_sb, but enough if we do it when being
+        * called with wait == 1.
+        */
+       if (!wait)
                return 0;
 
-       /* real inodes have already been synced by sync_filesystem(ovl_sb) */
+       upper_sb = ofs->upper_mnt->mnt_sb;
+
        down_read(&upper_sb->s_umount);
-       ret = upper_sb->s_op->sync_fs(upper_sb, wait);
+       ret = sync_filesystem(upper_sb);
        up_read(&upper_sb->s_umount);
+
        return ret;
 }
 
@@ -295,6 +314,11 @@ static bool ovl_force_readonly(struct ovl_fs *ofs)
        return (!ofs->upper_mnt || !ofs->workdir);
 }
 
+static const char *ovl_redirect_mode_def(void)
+{
+       return ovl_redirect_dir_def ? "on" : "off";
+}
+
 /**
  * ovl_show_options
  *
@@ -313,12 +337,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
        }
        if (ofs->config.default_permissions)
                seq_puts(m, ",default_permissions");
-       if (ofs->config.redirect_dir != ovl_redirect_dir_def)
-               seq_printf(m, ",redirect_dir=%s",
-                          ofs->config.redirect_dir ? "on" : "off");
+       if (strcmp(ofs->config.redirect_mode, ovl_redirect_mode_def()) != 0)
+               seq_printf(m, ",redirect_dir=%s", ofs->config.redirect_mode);
        if (ofs->config.index != ovl_index_def)
-               seq_printf(m, ",index=%s",
-                          ofs->config.index ? "on" : "off");
+               seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off");
        return 0;
 }
 
@@ -326,7 +348,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
 
-       if (!(*flags & MS_RDONLY) && ovl_force_readonly(ofs))
+       if (!(*flags & SB_RDONLY) && ovl_force_readonly(ofs))
                return -EROFS;
 
        return 0;
@@ -348,8 +370,7 @@ enum {
        OPT_UPPERDIR,
        OPT_WORKDIR,
        OPT_DEFAULT_PERMISSIONS,
-       OPT_REDIRECT_DIR_ON,
-       OPT_REDIRECT_DIR_OFF,
+       OPT_REDIRECT_DIR,
        OPT_INDEX_ON,
        OPT_INDEX_OFF,
        OPT_ERR,
@@ -360,8 +381,7 @@ static const match_table_t ovl_tokens = {
        {OPT_UPPERDIR,                  "upperdir=%s"},
        {OPT_WORKDIR,                   "workdir=%s"},
        {OPT_DEFAULT_PERMISSIONS,       "default_permissions"},
-       {OPT_REDIRECT_DIR_ON,           "redirect_dir=on"},
-       {OPT_REDIRECT_DIR_OFF,          "redirect_dir=off"},
+       {OPT_REDIRECT_DIR,              "redirect_dir=%s"},
        {OPT_INDEX_ON,                  "index=on"},
        {OPT_INDEX_OFF,                 "index=off"},
        {OPT_ERR,                       NULL}
@@ -390,10 +410,37 @@ static char *ovl_next_opt(char **s)
        return sbegin;
 }
 
+static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode)
+{
+       if (strcmp(mode, "on") == 0) {
+               config->redirect_dir = true;
+               /*
+                * Does not make sense to have redirect creation without
+                * redirect following.
+                */
+               config->redirect_follow = true;
+       } else if (strcmp(mode, "follow") == 0) {
+               config->redirect_follow = true;
+       } else if (strcmp(mode, "off") == 0) {
+               if (ovl_redirect_always_follow)
+                       config->redirect_follow = true;
+       } else if (strcmp(mode, "nofollow") != 0) {
+               pr_err("overlayfs: bad mount option \"redirect_dir=%s\"\n",
+                      mode);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int ovl_parse_opt(char *opt, struct ovl_config *config)
 {
        char *p;
 
+       config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
+       if (!config->redirect_mode)
+               return -ENOMEM;
+
        while ((p = ovl_next_opt(&opt)) != NULL) {
                int token;
                substring_t args[MAX_OPT_ARGS];
@@ -428,12 +475,11 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                        config->default_permissions = true;
                        break;
 
-               case OPT_REDIRECT_DIR_ON:
-                       config->redirect_dir = true;
-                       break;
-
-               case OPT_REDIRECT_DIR_OFF:
-                       config->redirect_dir = false;
+               case OPT_REDIRECT_DIR:
+                       kfree(config->redirect_mode);
+                       config->redirect_mode = match_strdup(&args[0]);
+                       if (!config->redirect_mode)
+                               return -ENOMEM;
                        break;
 
                case OPT_INDEX_ON:
@@ -458,7 +504,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                config->workdir = NULL;
        }
 
-       return 0;
+       return ovl_parse_redirect_mode(config, config->redirect_mode);
 }
 
 #define OVL_WORKDIR_NAME "work"
@@ -1160,7 +1206,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        if (!cred)
                goto out_err;
 
-       ofs->config.redirect_dir = ovl_redirect_dir_def;
        ofs->config.index = ovl_index_def;
        err = ovl_parse_opt((char *) data, &ofs->config);
        if (err)
@@ -1190,7 +1235,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        goto out_err;
 
                if (!ofs->workdir)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
 
                sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth;
                sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran;
@@ -1203,7 +1248,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        /* If the upper fs is nonexistent, we mark overlayfs r/o too */
        if (!ofs->upper_mnt)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        else if (ofs->upper_mnt->mnt_sb != ofs->same_sb)
                ofs->same_sb = NULL;
 
@@ -1213,7 +1258,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        goto out_free_oe;
 
                if (!ofs->indexdir)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
        }
 
        /* Show index=off/on in /proc/mounts for any of the reasons above */
@@ -1227,7 +1272,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_op = &ovl_super_operations;
        sb->s_xattr = ovl_xattr_handlers;
        sb->s_fs_info = ofs;
-       sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
+       sb->s_flags |= SB_POSIXACL | SB_NOREMOTELOCK;
 
        err = -ENOMEM;
        root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
index 79375fc..d67a72d 100644 (file)
@@ -430,8 +430,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                 * safe because the task has stopped executing permanently.
                 */
                if (permitted && (task->flags & PF_DUMPCORE)) {
-                       eip = KSTK_EIP(task);
-                       esp = KSTK_ESP(task);
+                       if (try_get_task_stack(task)) {
+                               eip = KSTK_EIP(task);
+                               esp = KSTK_ESP(task);
+                               put_task_stack(task);
+                       }
                }
        }
 
index 31934cb..60316b5 100644 (file)
@@ -443,8 +443,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
                save_stack_trace_tsk(task, &trace);
 
                for (i = 0; i < trace.nr_entries; i++) {
-                       seq_printf(m, "[<%pK>] %pB\n",
-                                  (void *)entries[i], (void *)entries[i]);
+                       seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
                }
                unlock_trace(task);
        }
@@ -2269,7 +2268,7 @@ static int show_timer(struct seq_file *m, void *v)
        notify = timer->it_sigev_notify;
 
        seq_printf(m, "ID: %d\n", timer->it_id);
-       seq_printf(m, "signal: %d/%p\n",
+       seq_printf(m, "signal: %d/%px\n",
                   timer->sigq->info.si_signo,
                   timer->sigq->info.si_value.sival_ptr);
        seq_printf(m, "notify: %s/%s.%d\n",
index 225f541..dd0f826 100644 (file)
@@ -483,7 +483,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
 
        /* User space would break if executables or devices appear on proc */
        s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
-       s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
+       s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC;
        s->s_blocksize = 1024;
        s->s_blocksize_bits = 10;
        s->s_magic = PROC_SUPER_MAGIC;
index 4e42aba..ede8e64 100644 (file)
@@ -91,7 +91,7 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
 {
        struct pid_namespace *ns;
 
-       if (flags & MS_KERNMOUNT) {
+       if (flags & SB_KERNMOUNT) {
                ns = data;
                data = NULL;
        } else {
index 7b635d1..b786840 100644 (file)
@@ -45,10 +45,10 @@ struct proc_fs_info {
 static int show_sb_opts(struct seq_file *m, struct super_block *sb)
 {
        static const struct proc_fs_info fs_info[] = {
-               { MS_SYNCHRONOUS, ",sync" },
-               { MS_DIRSYNC, ",dirsync" },
-               { MS_MANDLOCK, ",mand" },
-               { MS_LAZYTIME, ",lazytime" },
+               { SB_SYNCHRONOUS, ",sync" },
+               { SB_DIRSYNC, ",dirsync" },
+               { SB_MANDLOCK, ",mand" },
+               { SB_LAZYTIME, ",lazytime" },
                { 0, NULL }
        };
        const struct proc_fs_info *fs_infop;
index 3a67cfb..3d46fe3 100644 (file)
@@ -47,7 +47,7 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data)
        sync_filesystem(sb);
        qs = qnx4_sb(sb);
        qs->Version = QNX4_VERSION;
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -199,7 +199,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
 
        s->s_op = &qnx4_sops;
        s->s_magic = QNX4_SUPER_MAGIC;
-       s->s_flags |= MS_RDONLY;        /* Yup, read-only yet */
+       s->s_flags |= SB_RDONLY;        /* Yup, read-only yet */
 
        /* Check the superblock signature. Since the qnx4 code is
           dangerous, we should leave as quickly as possible
index 1192422..4aeb26b 100644 (file)
@@ -56,7 +56,7 @@ static int qnx6_show_options(struct seq_file *seq, struct dentry *root)
 static int qnx6_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -427,7 +427,7 @@ mmi_success:
        }
        s->s_op = &qnx6_sops;
        s->s_magic = QNX6_SUPER_MAGIC;
-       s->s_flags |= MS_RDONLY;        /* Yup, read-only yet */
+       s->s_flags |= SB_RDONLY;        /* Yup, read-only yet */
 
        /* ease the later tree level calculations */
        sbi = QNX6_SB(s);
index 39f1b0b..020c597 100644 (file)
@@ -941,12 +941,13 @@ static int dqinit_needed(struct inode *inode, int type)
 }
 
 /* This routine is guarded by s_umount semaphore */
-static void add_dquot_ref(struct super_block *sb, int type)
+static int add_dquot_ref(struct super_block *sb, int type)
 {
        struct inode *inode, *old_inode = NULL;
 #ifdef CONFIG_QUOTA_DEBUG
        int reserved = 0;
 #endif
+       int err = 0;
 
        spin_lock(&sb->s_inode_list_lock);
        list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
@@ -966,7 +967,11 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        reserved = 1;
 #endif
                iput(old_inode);
-               __dquot_initialize(inode, type);
+               err = __dquot_initialize(inode, type);
+               if (err) {
+                       iput(inode);
+                       goto out;
+               }
 
                /*
                 * We hold a reference to 'inode' so it couldn't have been
@@ -981,7 +986,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
        }
        spin_unlock(&sb->s_inode_list_lock);
        iput(old_inode);
-
+out:
 #ifdef CONFIG_QUOTA_DEBUG
        if (reserved) {
                quota_error(sb, "Writes happened before quota was turned on "
@@ -989,6 +994,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        "Please run quotacheck(8)");
        }
 #endif
+       return err;
 }
 
 /*
@@ -2379,10 +2385,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
        dqopt->flags |= dquot_state_flag(flags, type);
        spin_unlock(&dq_state_lock);
 
-       add_dquot_ref(sb, type);
-
-       return 0;
+       error = add_dquot_ref(sb, type);
+       if (error)
+               dquot_disable(sb, type, flags);
 
+       return error;
 out_file_init:
        dqopt->files[type] = NULL;
        iput(inode);
@@ -2985,7 +2992,8 @@ static int __init dquot_init(void)
        pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
                " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
 
-       register_shrinker(&dqcache_shrinker);
+       if (register_shrinker(&dqcache_shrinker))
+               panic("Cannot register dquot shrinker");
 
        return 0;
 }
index 11a48af..b13fc02 100644 (file)
@@ -2106,7 +2106,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                        journal_end(th);
                        goto out_inserted_sd;
                }
-       } else if (inode->i_sb->s_flags & MS_POSIXACL) {
+       } else if (inode->i_sb->s_flags & SB_POSIXACL) {
                reiserfs_warning(inode->i_sb, "jdm-13090",
                                 "ACLs aren't enabled in the fs, "
                                 "but vfs thinks they are!");
index 69ff280..7005735 100644 (file)
@@ -1960,7 +1960,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
        /*
         * Cancel flushing of old commits. Note that neither of these works
         * will be requeued because superblock is being shutdown and doesn't
-        * have MS_ACTIVE set.
+        * have SB_ACTIVE set.
         */
        reiserfs_cancel_old_flush(sb);
        /* wait for all commits to finish */
@@ -4302,7 +4302,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
                 * Avoid queueing work when sb is being shut down. Transaction
                 * will be flushed on journal shutdown.
                 */
-               if (sb->s_flags & MS_ACTIVE)
+               if (sb->s_flags & SB_ACTIVE)
                        queue_delayed_work(REISERFS_SB(sb)->commit_wq,
                                           &journal->j_work, HZ / 10);
        }
@@ -4393,7 +4393,7 @@ void reiserfs_abort_journal(struct super_block *sb, int errno)
        if (!journal->j_errno)
                journal->j_errno = errno;
 
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        set_bit(J_ABORTED, &journal->j_state);
 
 #ifdef CONFIG_REISERFS_CHECK
index 64f49ca..7e288d9 100644 (file)
@@ -390,7 +390,7 @@ void __reiserfs_error(struct super_block *sb, const char *id,
                return;
 
        reiserfs_info(sb, "Remounting filesystem read-only\n");
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        reiserfs_abort_journal(sb, -EIO);
 }
 
@@ -409,7 +409,7 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
        printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
               error_buf);
 
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        reiserfs_abort_journal(sb, errno);
 }
 
index 5464ec5..1fc934d 100644 (file)
@@ -121,7 +121,7 @@ void reiserfs_schedule_old_flush(struct super_block *s)
         * Avoid scheduling flush when sb is being shut down. It can race
         * with journal shutdown and free still queued delayed work.
         */
-       if (sb_rdonly(s) || !(s->s_flags & MS_ACTIVE))
+       if (sb_rdonly(s) || !(s->s_flags & SB_ACTIVE))
                return;
 
        spin_lock(&sbi->old_work_lock);
@@ -252,11 +252,11 @@ static int finish_unfinished(struct super_block *s)
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       if (s->s_flags & MS_ACTIVE) {
+       if (s->s_flags & SB_ACTIVE) {
                ms_active_set = 0;
        } else {
                ms_active_set = 1;
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
        }
        /* Turn on quotas so that they are updated correctly */
        for (i = 0; i < REISERFS_MAXQUOTAS; i++) {
@@ -411,7 +411,7 @@ static int finish_unfinished(struct super_block *s)
        reiserfs_write_lock(s);
        if (ms_active_set)
                /* Restore the flag back */
-               s->s_flags &= ~MS_ACTIVE;
+               s->s_flags &= ~SB_ACTIVE;
 #endif
        pathrelse(&path);
        if (done)
@@ -1521,7 +1521,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                        goto out_err_unlock;
        }
 
-       if (*mount_flags & MS_RDONLY) {
+       if (*mount_flags & SB_RDONLY) {
                reiserfs_write_unlock(s);
                reiserfs_xattr_init(s, *mount_flags);
                /* remount read-only */
@@ -1567,7 +1567,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
 
                /* now it is safe to call journal_begin */
-               s->s_flags &= ~MS_RDONLY;
+               s->s_flags &= ~SB_RDONLY;
                err = journal_begin(&th, s, 10);
                if (err)
                        goto out_err_unlock;
@@ -1575,7 +1575,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                /* Mount a partition which is read-only, read-write */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
                REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
-               s->s_flags &= ~MS_RDONLY;
+               s->s_flags &= ~SB_RDONLY;
                set_sb_umount_state(rs, REISERFS_ERROR_FS);
                if (!old_format_only(s))
                        set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
@@ -1590,7 +1590,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                goto out_err_unlock;
 
        reiserfs_write_unlock(s);
-       if (!(*mount_flags & MS_RDONLY)) {
+       if (!(*mount_flags & SB_RDONLY)) {
                dquot_resume(s, -1);
                reiserfs_write_lock(s);
                finish_unfinished(s);
@@ -2055,7 +2055,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
                SWARN(silent, s, "clm-7000",
                      "Detected readonly device, marking FS readonly");
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
        }
        args.objectid = REISERFS_ROOT_OBJECTID;
        args.dirid = REISERFS_ROOT_PARENT_OBJECTID;
@@ -2591,7 +2591,6 @@ out:
                return err;
        if (inode->i_size < off + len - towrite)
                i_size_write(inode, off + len - towrite);
-       inode->i_version++;
        inode->i_mtime = inode->i_ctime = current_time(inode);
        mark_inode_dirty(inode);
        return len - towrite;
index 46492fb..5dbf532 100644 (file)
@@ -959,7 +959,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
 
 /*
  * We need to take a copy of the mount flags since things like
- * MS_RDONLY don't get set until *after* we're called.
+ * SB_RDONLY don't get set until *after* we're called.
  * mount_flags != mount_options
  */
 int reiserfs_xattr_init(struct super_block *s, int mount_flags)
@@ -971,7 +971,7 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
        if (err)
                goto error;
 
-       if (d_really_is_negative(privroot) && !(mount_flags & MS_RDONLY)) {
+       if (d_really_is_negative(privroot) && !(mount_flags & SB_RDONLY)) {
                inode_lock(d_inode(s->s_root));
                err = create_privroot(REISERFS_SB(s)->priv_root);
                inode_unlock(d_inode(s->s_root));
@@ -999,11 +999,11 @@ error:
                clear_bit(REISERFS_POSIXACL, &REISERFS_SB(s)->s_mount_opt);
        }
 
-       /* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
+       /* The super_block SB_POSIXACL must mirror the (no)acl mount option. */
        if (reiserfs_posixacl(s))
-               s->s_flags |= MS_POSIXACL;
+               s->s_flags |= SB_POSIXACL;
        else
-               s->s_flags &= ~MS_POSIXACL;
+               s->s_flags &= ~SB_POSIXACL;
 
        return err;
 }
index 0186fe6..8f06fd1 100644 (file)
@@ -451,7 +451,7 @@ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int romfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -502,7 +502,7 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_maxbytes = 0xFFFFFFFF;
        sb->s_magic = ROMFS_MAGIC;
-       sb->s_flags |= MS_RDONLY | MS_NOATIME;
+       sb->s_flags |= SB_RDONLY | SB_NOATIME;
        sb->s_op = &romfs_super_ops;
 
 #ifdef CONFIG_ROMFS_ON_MTD
index cf01e15..8a73b97 100644 (file)
@@ -195,7 +195,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
                (u64) le64_to_cpu(sblk->id_table_start));
 
        sb->s_maxbytes = MAX_LFS_FILESIZE;
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        sb->s_op = &squashfs_super_ops;
 
        err = -ENOMEM;
@@ -373,7 +373,7 @@ static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int squashfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
index b072a8b..5b2a24f 100644 (file)
@@ -35,11 +35,11 @@ static int flags_by_mnt(int mnt_flags)
 static int flags_by_sb(int s_flags)
 {
        int flags = 0;
-       if (s_flags & MS_SYNCHRONOUS)
+       if (s_flags & SB_SYNCHRONOUS)
                flags |= ST_SYNCHRONOUS;
-       if (s_flags & MS_MANDLOCK)
+       if (s_flags & SB_MANDLOCK)
                flags |= ST_MANDLOCK;
-       if (s_flags & MS_RDONLY)
+       if (s_flags & SB_RDONLY)
                flags |= ST_RDONLY;
        return flags;
 }
index d4e33e8..06bd25d 100644 (file)
@@ -191,6 +191,24 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
 
        INIT_LIST_HEAD(&s->s_mounts);
        s->s_user_ns = get_user_ns(user_ns);
+       init_rwsem(&s->s_umount);
+       lockdep_set_class(&s->s_umount, &type->s_umount_key);
+       /*
+        * sget() can have s_umount recursion.
+        *
+        * When it cannot find a suitable sb, it allocates a new
+        * one (this one), and tries again to find a suitable old
+        * one.
+        *
+        * In case that succeeds, it will acquire the s_umount
+        * lock of the old one. Since these are clearly distrinct
+        * locks, and this object isn't exposed yet, there's no
+        * risk of deadlocks.
+        *
+        * Annotate this by putting this lock in a different
+        * subclass.
+        */
+       down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
 
        if (security_sb_alloc(s))
                goto fail;
@@ -218,25 +236,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
                goto fail;
        if (list_lru_init_memcg(&s->s_inode_lru))
                goto fail;
-
-       init_rwsem(&s->s_umount);
-       lockdep_set_class(&s->s_umount, &type->s_umount_key);
-       /*
-        * sget() can have s_umount recursion.
-        *
-        * When it cannot find a suitable sb, it allocates a new
-        * one (this one), and tries again to find a suitable old
-        * one.
-        *
-        * In case that succeeds, it will acquire the s_umount
-        * lock of the old one. Since these are clearly distrinct
-        * locks, and this object isn't exposed yet, there's no
-        * risk of deadlocks.
-        *
-        * Annotate this by putting this lock in a different
-        * subclass.
-        */
-       down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
        s->s_count = 1;
        atomic_set(&s->s_active, 1);
        mutex_init(&s->s_vfs_rename_mutex);
@@ -518,7 +517,11 @@ retry:
        hlist_add_head(&s->s_instances, &type->fs_supers);
        spin_unlock(&sb_lock);
        get_filesystem(type);
-       register_shrinker(&s->s_shrink);
+       err = register_shrinker(&s->s_shrink);
+       if (err) {
+               deactivate_locked_super(s);
+               s = ERR_PTR(err);
+       }
        return s;
 }
 
index 20b8f82..fb49510 100644 (file)
@@ -30,7 +30,7 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
        void *ns;
        bool new_sb;
 
-       if (!(flags & MS_KERNMOUNT)) {
+       if (!(flags & SB_KERNMOUNT)) {
                if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
                        return ERR_PTR(-EPERM);
        }
index 3c47b7d..bec9f79 100644 (file)
@@ -63,7 +63,7 @@ static int sysv_remount(struct super_block *sb, int *flags, char *data)
 
        sync_filesystem(sb);
        if (sbi->s_forced_ro)
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
        return 0;
 }
 
index 0d56e48..89765dd 100644 (file)
@@ -333,7 +333,7 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
        /* set up enough so that it can read an inode */
        sb->s_op = &sysv_sops;
        if (sbi->s_forced_ro)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        if (sbi->s_truncate)
                sb->s_d_op = &sysv_dentry_operations;
        root_inode = sysv_iget(sb, SYSV_ROOT_INO);
index a02aa59..dfe8506 100644 (file)
@@ -1406,7 +1406,7 @@ int ubifs_update_time(struct inode *inode, struct timespec *time,
        if (flags & S_MTIME)
                inode->i_mtime = *time;
 
-       if (!(inode->i_sb->s_flags & MS_LAZYTIME))
+       if (!(inode->i_sb->s_flags & SB_LAZYTIME))
                iflags |= I_DIRTY_SYNC;
 
        release = ui->dirty;
index 3be2890..fe77e96 100644 (file)
@@ -84,7 +84,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
        if (!c->ro_error) {
                c->ro_error = 1;
                c->no_chk_data_crc = 0;
-               c->vfs_sb->s_flags |= MS_RDONLY;
+               c->vfs_sb->s_flags |= SB_RDONLY;
                ubifs_warn(c, "switched to read-only mode, error %d", err);
                dump_stack();
        }
index 7503e7c..0beb285 100644 (file)
@@ -968,7 +968,7 @@ static int parse_standard_option(const char *option)
 
        pr_notice("UBIFS: parse %s\n", option);
        if (!strcmp(option, "sync"))
-               return MS_SYNCHRONOUS;
+               return SB_SYNCHRONOUS;
        return 0;
 }
 
@@ -1160,8 +1160,8 @@ static int mount_ubifs(struct ubifs_info *c)
        size_t sz;
 
        c->ro_mount = !!sb_rdonly(c->vfs_sb);
-       /* Suppress error messages while probing if MS_SILENT is set */
-       c->probing = !!(c->vfs_sb->s_flags & MS_SILENT);
+       /* Suppress error messages while probing if SB_SILENT is set */
+       c->probing = !!(c->vfs_sb->s_flags & SB_SILENT);
 
        err = init_constants_early(c);
        if (err)
@@ -1852,7 +1852,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
                return err;
        }
 
-       if (c->ro_mount && !(*flags & MS_RDONLY)) {
+       if (c->ro_mount && !(*flags & SB_RDONLY)) {
                if (c->ro_error) {
                        ubifs_msg(c, "cannot re-mount R/W due to prior errors");
                        return -EROFS;
@@ -1864,7 +1864,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
                err = ubifs_remount_rw(c);
                if (err)
                        return err;
-       } else if (!c->ro_mount && (*flags & MS_RDONLY)) {
+       } else if (!c->ro_mount && (*flags & SB_RDONLY)) {
                if (c->ro_error) {
                        ubifs_msg(c, "cannot re-mount R/O due to prior errors");
                        return -EROFS;
@@ -2117,7 +2117,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
         */
        ubi = open_ubi(name, UBI_READONLY);
        if (IS_ERR(ubi)) {
-               if (!(flags & MS_SILENT))
+               if (!(flags & SB_SILENT))
                        pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
                               current->pid, name, (int)PTR_ERR(ubi));
                return ERR_CAST(ubi);
@@ -2143,18 +2143,18 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
                kfree(c);
                /* A new mount point for already mounted UBIFS */
                dbg_gen("this ubi volume is already mounted");
-               if (!!(flags & MS_RDONLY) != c1->ro_mount) {
+               if (!!(flags & SB_RDONLY) != c1->ro_mount) {
                        err = -EBUSY;
                        goto out_deact;
                }
        } else {
-               err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+               err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
                if (err)
                        goto out_deact;
                /* We do not support atime */
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
 #ifndef CONFIG_UBIFS_ATIME_SUPPORT
-               sb->s_flags |= MS_NOATIME;
+               sb->s_flags |= SB_NOATIME;
 #else
                ubifs_msg(c, "full atime support is enabled.");
 #endif
index 63c7468..5ee7af8 100644 (file)
@@ -1201,7 +1201,7 @@ struct ubifs_debug_info;
  * @need_recovery: %1 if the file-system needs recovery
  * @replaying: %1 during journal replay
  * @mounting: %1 while mounting
- * @probing: %1 while attempting to mount if MS_SILENT mount flag is set
+ * @probing: %1 while attempting to mount if SB_SILENT mount flag is set
  * @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
  * @replay_list: temporary list used during journal replay
  * @replay_buds: list of buds to replay
@@ -1850,7 +1850,7 @@ __printf(2, 3)
 void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...);
 /*
  * A conditional variant of 'ubifs_err()' which doesn't output anything
- * if probing (ie. MS_SILENT set).
+ * if probing (ie. SB_SILENT set).
  */
 #define ubifs_errc(c, fmt, ...)                                                \
 do {                                                                   \
index f80e0a0..f73239a 100644 (file)
@@ -650,7 +650,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        sync_filesystem(sb);
        if (lvidiu) {
                int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
-               if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+               if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY))
                        return -EACCES;
        }
 
@@ -673,10 +673,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        sbi->s_dmode = uopt.dmode;
        write_unlock(&sbi->s_cred_lock);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out_unlock;
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                udf_close_lvid(sb);
        else
                udf_open_lvid(sb);
index b5cd790..e727ee0 100644 (file)
@@ -115,7 +115,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -205,7 +205,7 @@ do_more:
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
 
        if (overflow) {
@@ -567,7 +567,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -688,7 +688,7 @@ cg_found:
 succed:
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
index 916b4a4..e1ef0f0 100644 (file)
@@ -112,7 +112,7 @@ void ufs_free_inode (struct inode * inode)
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        
        ufs_mark_sb_dirty(sb);
@@ -146,14 +146,14 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
                set_buffer_uptodate(bh);
                mark_buffer_dirty(bh);
                unlock_buffer(bh);
-               if (sb->s_flags & MS_SYNCHRONOUS)
+               if (sb->s_flags & SB_SYNCHRONOUS)
                        sync_dirty_buffer(bh);
                brelse(bh);
        }
 
        fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
        ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
 
        UFSD("EXIT\n");
@@ -284,7 +284,7 @@ cg_found:
        }
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -330,7 +330,7 @@ cg_found:
                ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, ts.tv_nsec);
                mark_buffer_dirty(bh);
                unlock_buffer(bh);
-               if (sb->s_flags & MS_SYNCHRONOUS)
+               if (sb->s_flags & SB_SYNCHRONOUS)
                        sync_dirty_buffer(bh);
                brelse(bh);
        }
index 6440003..4d497e9 100644 (file)
@@ -282,7 +282,7 @@ void ufs_error (struct super_block * sb, const char * function,
                usb1->fs_clean = UFS_FSBAD;
                ubh_mark_buffer_dirty(USPI_UBH(uspi));
                ufs_mark_sb_dirty(sb);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        va_start(args, fmt);
        vaf.fmt = fmt;
@@ -320,7 +320,7 @@ void ufs_panic (struct super_block * sb, const char * function,
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        pr_crit("panic (device %s): %s: %pV\n",
                sb->s_id, function, &vaf);
        va_end(args);
@@ -905,7 +905,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=old is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -921,7 +921,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=nextstep is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -937,7 +937,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=nextstep-cd is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -953,7 +953,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=openstep is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -968,7 +968,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=hp is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        default:
@@ -1125,21 +1125,21 @@ magic_found:
                        break;
                case UFS_FSACTIVE:
                        pr_err("%s(): fs is active\n", __func__);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                case UFS_FSBAD:
                        pr_err("%s(): fs is bad\n", __func__);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                default:
                        pr_err("%s(): can't grok fs_clean 0x%x\n",
                               __func__, usb1->fs_clean);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                }
        } else {
                pr_err("%s(): fs needs fsck\n", __func__);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /*
@@ -1328,7 +1328,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                return -EINVAL;
        }
 
-       if ((bool)(*mount_flags & MS_RDONLY) == sb_rdonly(sb)) {
+       if ((bool)(*mount_flags & SB_RDONLY) == sb_rdonly(sb)) {
                UFS_SB(sb)->s_mount_opt = new_mount_opt;
                mutex_unlock(&UFS_SB(sb)->s_lock);
                return 0;
@@ -1337,7 +1337,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
        /*
         * fs was mouted as rw, remounting ro
         */
-       if (*mount_flags & MS_RDONLY) {
+       if (*mount_flags & SB_RDONLY) {
                ufs_put_super_internal(sb);
                usb1->fs_time = cpu_to_fs32(sb, get_seconds());
                if ((flags & UFS_ST_MASK) == UFS_ST_SUN
@@ -1346,7 +1346,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                        ufs_set_fs_state(sb, usb1, usb3,
                                UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
                ubh_mark_buffer_dirty (USPI_UBH(uspi));
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else {
        /*
         * fs was mounted as ro, remounting rw
@@ -1370,7 +1370,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                        mutex_unlock(&UFS_SB(sb)->s_lock);
                        return -EPERM;
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 #endif
        }
        UFS_SB(sb)->s_mount_opt = new_mount_opt;
index ac9a4e6..41a75f9 100644 (file)
@@ -570,11 +570,14 @@ out:
 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                                              struct userfaultfd_wait_queue *ewq)
 {
+       struct userfaultfd_ctx *release_new_ctx;
+
        if (WARN_ON_ONCE(current->flags & PF_EXITING))
                goto out;
 
        ewq->ctx = ctx;
        init_waitqueue_entry(&ewq->wq, current);
+       release_new_ctx = NULL;
 
        spin_lock(&ctx->event_wqh.lock);
        /*
@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                                new = (struct userfaultfd_ctx *)
                                        (unsigned long)
                                        ewq->msg.arg.reserved.reserved1;
-
-                               userfaultfd_ctx_put(new);
+                               release_new_ctx = new;
                        }
                        break;
                }
@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
        __set_current_state(TASK_RUNNING);
        spin_unlock(&ctx->event_wqh.lock);
 
+       if (release_new_ctx) {
+               struct vm_area_struct *vma;
+               struct mm_struct *mm = release_new_ctx->mm;
+
+               /* the various vma->vm_userfaultfd_ctx still points to it */
+               down_write(&mm->mmap_sem);
+               for (vma = mm->mmap; vma; vma = vma->vm_next)
+                       if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
+                               vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+               up_write(&mm->mmap_sem);
+
+               userfaultfd_ctx_put(release_new_ctx);
+       }
+
        /*
         * ctx may go away after this if the userfault pseudo fd is
         * already released.
index 0da8001..83ed771 100644 (file)
@@ -702,7 +702,7 @@ xfs_alloc_ag_vextent(
        ASSERT(args->agbno % args->alignment == 0);
 
        /* if not file data, insert new block into the reverse map btree */
-       if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+       if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
                error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
                                       args->agbno, args->len, &args->oinfo);
                if (error)
@@ -1682,7 +1682,7 @@ xfs_free_ag_extent(
        bno_cur = cnt_cur = NULL;
        mp = tp->t_mountp;
 
-       if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+       if (!xfs_rmap_should_skip_owner_update(oinfo)) {
                error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
                if (error)
                        goto error0;
index 6249c92..a76914d 100644 (file)
@@ -212,6 +212,7 @@ xfs_attr_set(
        int                     flags)
 {
        struct xfs_mount        *mp = dp->i_mount;
+       struct xfs_buf          *leaf_bp = NULL;
        struct xfs_da_args      args;
        struct xfs_defer_ops    dfops;
        struct xfs_trans_res    tres;
@@ -327,9 +328,16 @@ xfs_attr_set(
                 * GROT: another possible req'mt for a double-split btree op.
                 */
                xfs_defer_init(args.dfops, args.firstblock);
-               error = xfs_attr_shortform_to_leaf(&args);
+               error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
                if (error)
                        goto out_defer_cancel;
+               /*
+                * Prevent the leaf buffer from being unlocked so that a
+                * concurrent AIL push cannot grab the half-baked leaf
+                * buffer and run into problems with the write verifier.
+                */
+               xfs_trans_bhold(args.trans, leaf_bp);
+               xfs_defer_bjoin(args.dfops, leaf_bp);
                xfs_defer_ijoin(args.dfops, dp);
                error = xfs_defer_finish(&args.trans, args.dfops);
                if (error)
@@ -337,13 +345,14 @@ xfs_attr_set(
 
                /*
                 * Commit the leaf transformation.  We'll need another (linked)
-                * transaction to add the new attribute to the leaf.
+                * transaction to add the new attribute to the leaf, which
+                * means that we have to hold & join the leaf buffer here too.
                 */
-
                error = xfs_trans_roll_inode(&args.trans, dp);
                if (error)
                        goto out;
-
+               xfs_trans_bjoin(args.trans, leaf_bp);
+               leaf_bp = NULL;
        }
 
        if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
@@ -374,8 +383,9 @@ xfs_attr_set(
 
 out_defer_cancel:
        xfs_defer_cancel(&dfops);
-       args.trans = NULL;
 out:
+       if (leaf_bp)
+               xfs_trans_brelse(args.trans, leaf_bp);
        if (args.trans)
                xfs_trans_cancel(args.trans);
        xfs_iunlock(dp, XFS_ILOCK_EXCL);
index 53cc8b9..601eaa3 100644 (file)
@@ -735,10 +735,13 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
 }
 
 /*
- * Convert from using the shortform to the leaf.
+ * Convert from using the shortform to the leaf.  On success, return the
+ * buffer so that we can keep it locked until we're totally done with it.
  */
 int
-xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
+xfs_attr_shortform_to_leaf(
+       struct xfs_da_args      *args,
+       struct xfs_buf          **leaf_bp)
 {
        xfs_inode_t *dp;
        xfs_attr_shortform_t *sf;
@@ -818,7 +821,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
                sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
        }
        error = 0;
-
+       *leaf_bp = bp;
 out:
        kmem_free(tmpbuffer);
        return error;
index f7dda0c..894124e 100644 (file)
@@ -48,7 +48,8 @@ void  xfs_attr_shortform_create(struct xfs_da_args *args);
 void   xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff);
 int    xfs_attr_shortform_lookup(struct xfs_da_args *args);
 int    xfs_attr_shortform_getvalue(struct xfs_da_args *args);
-int    xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
+int    xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
+                       struct xfs_buf **leaf_bp);
 int    xfs_attr_shortform_remove(struct xfs_da_args *args);
 int    xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
 int    xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
index 08df809..1bddbba 100644 (file)
@@ -5136,7 +5136,7 @@ __xfs_bunmapi(
         * blowing out the transaction with a mix of EFIs and reflink
         * adjustments.
         */
-       if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
+       if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
                max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
        else
                max_len = len;
@@ -5662,7 +5662,8 @@ xfs_bmap_collapse_extents(
                *done = true;
                goto del_cursor;
        }
-       XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
+       XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
+                               del_cursor);
 
        new_startoff = got.br_startoff - offset_shift_fsb;
        if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
@@ -5767,7 +5768,8 @@ xfs_bmap_insert_extents(
                        goto del_cursor;
                }
        }
-       XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
+       XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
+                               del_cursor);
 
        if (stop_fsb >= got.br_startoff + got.br_blockcount) {
                error = -EIO;
index 072ebfe..087fea0 100644 (file)
@@ -249,6 +249,10 @@ xfs_defer_trans_roll(
        for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
                xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
 
+       /* Hold the (previously bjoin'd) buffer locked across the roll. */
+       for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
+               xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
+
        trace_xfs_defer_trans_roll((*tp)->t_mountp, dop);
 
        /* Roll the transaction. */
@@ -264,6 +268,12 @@ xfs_defer_trans_roll(
        for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
                xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
 
+       /* Rejoin the buffers and dirty them so the log moves forward. */
+       for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
+               xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
+               xfs_trans_bhold(*tp, dop->dop_bufs[i]);
+       }
+
        return error;
 }
 
@@ -295,6 +305,31 @@ xfs_defer_ijoin(
                }
        }
 
+       ASSERT(0);
+       return -EFSCORRUPTED;
+}
+
+/*
+ * Add this buffer to the deferred op.  Each joined buffer is relogged
+ * each time we roll the transaction.
+ */
+int
+xfs_defer_bjoin(
+       struct xfs_defer_ops            *dop,
+       struct xfs_buf                  *bp)
+{
+       int                             i;
+
+       for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
+               if (dop->dop_bufs[i] == bp)
+                       return 0;
+               else if (dop->dop_bufs[i] == NULL) {
+                       dop->dop_bufs[i] = bp;
+                       return 0;
+               }
+       }
+
+       ASSERT(0);
        return -EFSCORRUPTED;
 }
 
@@ -493,9 +528,7 @@ xfs_defer_init(
        struct xfs_defer_ops            *dop,
        xfs_fsblock_t                   *fbp)
 {
-       dop->dop_committed = false;
-       dop->dop_low = false;
-       memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes));
+       memset(dop, 0, sizeof(struct xfs_defer_ops));
        *fbp = NULLFSBLOCK;
        INIT_LIST_HEAD(&dop->dop_intake);
        INIT_LIST_HEAD(&dop->dop_pending);
index d4f046d..045beac 100644 (file)
@@ -59,6 +59,7 @@ enum xfs_defer_ops_type {
 };
 
 #define XFS_DEFER_OPS_NR_INODES        2       /* join up to two inodes */
+#define XFS_DEFER_OPS_NR_BUFS  2       /* join up to two buffers */
 
 struct xfs_defer_ops {
        bool                    dop_committed;  /* did any trans commit? */
@@ -66,8 +67,9 @@ struct xfs_defer_ops {
        struct list_head        dop_intake;     /* unlogged pending work */
        struct list_head        dop_pending;    /* logged pending work */
 
-       /* relog these inodes with each roll */
+       /* relog these with each roll */
        struct xfs_inode        *dop_inodes[XFS_DEFER_OPS_NR_INODES];
+       struct xfs_buf          *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
 };
 
 void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
@@ -77,6 +79,7 @@ void xfs_defer_cancel(struct xfs_defer_ops *dop);
 void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
 bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
 int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
+int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
 
 /* Description of a deferred type. */
 struct xfs_defer_op_type {
index de3f04a..3b57ef0 100644 (file)
@@ -920,8 +920,7 @@ STATIC xfs_agnumber_t
 xfs_ialloc_ag_select(
        xfs_trans_t     *tp,            /* transaction pointer */
        xfs_ino_t       parent,         /* parent directory inode number */
-       umode_t         mode,           /* bits set to indicate file type */
-       int             okalloc)        /* ok to allocate more space */
+       umode_t         mode)           /* bits set to indicate file type */
 {
        xfs_agnumber_t  agcount;        /* number of ag's in the filesystem */
        xfs_agnumber_t  agno;           /* current ag number */
@@ -978,9 +977,6 @@ xfs_ialloc_ag_select(
                        return agno;
                }
 
-               if (!okalloc)
-                       goto nextag;
-
                if (!pag->pagf_init) {
                        error = xfs_alloc_pagf_init(mp, tp, agno, flags);
                        if (error)
@@ -1680,7 +1676,6 @@ xfs_dialloc(
        struct xfs_trans        *tp,
        xfs_ino_t               parent,
        umode_t                 mode,
-       int                     okalloc,
        struct xfs_buf          **IO_agbp,
        xfs_ino_t               *inop)
 {
@@ -1692,6 +1687,7 @@ xfs_dialloc(
        int                     noroom = 0;
        xfs_agnumber_t          start_agno;
        struct xfs_perag        *pag;
+       int                     okalloc = 1;
 
        if (*IO_agbp) {
                /*
@@ -1707,7 +1703,7 @@ xfs_dialloc(
         * We do not have an agbp, so select an initial allocation
         * group for inode allocation.
         */
-       start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
+       start_agno = xfs_ialloc_ag_select(tp, parent, mode);
        if (start_agno == NULLAGNUMBER) {
                *inop = NULLFSINO;
                return 0;
index d2bdcd5..66a8de0 100644 (file)
@@ -81,7 +81,6 @@ xfs_dialloc(
        struct xfs_trans *tp,           /* transaction pointer */
        xfs_ino_t       parent,         /* parent inode (directory) */
        umode_t         mode,           /* mode bits for new inode */
-       int             okalloc,        /* ok to allocate more space */
        struct xfs_buf  **agbp,         /* buf for a.g. inode header */
        xfs_ino_t       *inop);         /* inode number allocated */
 
index 89bf16b..b0f3179 100644 (file)
@@ -632,8 +632,6 @@ xfs_iext_insert(
        struct xfs_iext_leaf    *new = NULL;
        int                     nr_entries, i;
 
-       trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
-
        if (ifp->if_height == 0)
                xfs_iext_alloc_root(ifp, cur);
        else if (ifp->if_height == 1)
@@ -661,6 +659,8 @@ xfs_iext_insert(
        xfs_iext_set(cur_rec(cur), irec);
        ifp->if_bytes += sizeof(struct xfs_iext_rec);
 
+       trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
+
        if (new)
                xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
 }
index 585b35d..c40d267 100644 (file)
@@ -1488,27 +1488,12 @@ __xfs_refcount_cow_alloc(
        xfs_extlen_t            aglen,
        struct xfs_defer_ops    *dfops)
 {
-       int                     error;
-
        trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
                        agbno, aglen);
 
        /* Add refcount btree reservation */
-       error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
+       return xfs_refcount_adjust_cow(rcur, agbno, aglen,
                        XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
-       if (error)
-               return error;
-
-       /* Add rmap entry */
-       if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
-               error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops,
-                               rcur->bc_private.a.agno,
-                               agbno, aglen, XFS_RMAP_OWN_COW);
-               if (error)
-                       return error;
-       }
-
-       return error;
 }
 
 /*
@@ -1521,27 +1506,12 @@ __xfs_refcount_cow_free(
        xfs_extlen_t            aglen,
        struct xfs_defer_ops    *dfops)
 {
-       int                     error;
-
        trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
                        agbno, aglen);
 
        /* Remove refcount btree reservation */
-       error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
+       return xfs_refcount_adjust_cow(rcur, agbno, aglen,
                        XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
-       if (error)
-               return error;
-
-       /* Remove rmap entry */
-       if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
-               error = xfs_rmap_free_extent(rcur->bc_mp, dfops,
-                               rcur->bc_private.a.agno,
-                               agbno, aglen, XFS_RMAP_OWN_COW);
-               if (error)
-                       return error;
-       }
-
-       return error;
 }
 
 /* Record a CoW staging extent in the refcount btree. */
@@ -1552,11 +1522,19 @@ xfs_refcount_alloc_cow_extent(
        xfs_fsblock_t                   fsb,
        xfs_extlen_t                    len)
 {
+       int                             error;
+
        if (!xfs_sb_version_hasreflink(&mp->m_sb))
                return 0;
 
-       return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
+       error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
                        fsb, len);
+       if (error)
+               return error;
+
+       /* Add rmap entry */
+       return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+                       XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
 }
 
 /* Forget a CoW staging event in the refcount btree. */
@@ -1567,9 +1545,17 @@ xfs_refcount_free_cow_extent(
        xfs_fsblock_t                   fsb,
        xfs_extlen_t                    len)
 {
+       int                             error;
+
        if (!xfs_sb_version_hasreflink(&mp->m_sb))
                return 0;
 
+       /* Remove rmap entry */
+       error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+                       XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
+       if (error)
+               return error;
+
        return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
                        fsb, len);
 }
index dd019ce..50db920 100644 (file)
@@ -367,6 +367,51 @@ xfs_rmap_lookup_le_range(
        return error;
 }
 
+/*
+ * Perform all the relevant owner checks for a removal op.  If we're doing an
+ * unknown-owner removal then we have no owner information to check.
+ */
+static int
+xfs_rmap_free_check_owner(
+       struct xfs_mount        *mp,
+       uint64_t                ltoff,
+       struct xfs_rmap_irec    *rec,
+       xfs_fsblock_t           bno,
+       xfs_filblks_t           len,
+       uint64_t                owner,
+       uint64_t                offset,
+       unsigned int            flags)
+{
+       int                     error = 0;
+
+       if (owner == XFS_RMAP_OWN_UNKNOWN)
+               return 0;
+
+       /* Make sure the unwritten flag matches. */
+       XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
+                       (rec->rm_flags & XFS_RMAP_UNWRITTEN), out);
+
+       /* Make sure the owner matches what we expect to find in the tree. */
+       XFS_WANT_CORRUPTED_GOTO(mp, owner == rec->rm_owner, out);
+
+       /* Check the offset, if necessary. */
+       if (XFS_RMAP_NON_INODE_OWNER(owner))
+               goto out;
+
+       if (flags & XFS_RMAP_BMBT_BLOCK) {
+               XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_flags & XFS_RMAP_BMBT_BLOCK,
+                               out);
+       } else {
+               XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_offset <= offset, out);
+               XFS_WANT_CORRUPTED_GOTO(mp,
+                               ltoff + rec->rm_blockcount >= offset + len,
+                               out);
+       }
+
+out:
+       return error;
+}
+
 /*
  * Find the extent in the rmap btree and remove it.
  *
@@ -444,33 +489,40 @@ xfs_rmap_unmap(
                goto out_done;
        }
 
-       /* Make sure the unwritten flag matches. */
-       XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
-                       (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
+       /*
+        * If we're doing an unknown-owner removal for EFI recovery, we expect
+        * to find the full range in the rmapbt or nothing at all.  If we
+        * don't find any rmaps overlapping either end of the range, we're
+        * done.  Hopefully this means that the EFI creator already queued
+        * (and finished) a RUI to remove the rmap.
+        */
+       if (owner == XFS_RMAP_OWN_UNKNOWN &&
+           ltrec.rm_startblock + ltrec.rm_blockcount <= bno) {
+               struct xfs_rmap_irec    rtrec;
+
+               error = xfs_btree_increment(cur, 0, &i);
+               if (error)
+                       goto out_error;
+               if (i == 0)
+                       goto out_done;
+               error = xfs_rmap_get_rec(cur, &rtrec, &i);
+               if (error)
+                       goto out_error;
+               XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+               if (rtrec.rm_startblock >= bno + len)
+                       goto out_done;
+       }
 
        /* Make sure the extent we found covers the entire freeing range. */
        XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
-               ltrec.rm_startblock + ltrec.rm_blockcount >=
-               bno + len, out_error);
+                       ltrec.rm_startblock + ltrec.rm_blockcount >=
+                       bno + len, out_error);
 
-       /* Make sure the owner matches what we expect to find in the tree. */
-       XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner ||
-                                   XFS_RMAP_NON_INODE_OWNER(owner), out_error);
-
-       /* Check the offset, if necessary. */
-       if (!XFS_RMAP_NON_INODE_OWNER(owner)) {
-               if (flags & XFS_RMAP_BMBT_BLOCK) {
-                       XFS_WANT_CORRUPTED_GOTO(mp,
-                                       ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK,
-                                       out_error);
-               } else {
-                       XFS_WANT_CORRUPTED_GOTO(mp,
-                                       ltrec.rm_offset <= offset, out_error);
-                       XFS_WANT_CORRUPTED_GOTO(mp,
-                                       ltoff + ltrec.rm_blockcount >= offset + len,
-                                       out_error);
-               }
-       }
+       /* Check owner information. */
+       error = xfs_rmap_free_check_owner(mp, ltoff, &ltrec, bno, len, owner,
+                       offset, flags);
+       if (error)
+               goto out_error;
 
        if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
                /* exact match, simply remove the record from rmap tree */
@@ -664,6 +716,7 @@ xfs_rmap_map(
                flags |= XFS_RMAP_UNWRITTEN;
        trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
                        unwritten, oinfo);
+       ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
 
        /*
         * For the initial lookup, look for an exact match or the left-adjacent
index 466ede6..0fcd5b1 100644 (file)
@@ -61,7 +61,21 @@ static inline void
 xfs_rmap_skip_owner_update(
        struct xfs_owner_info   *oi)
 {
-       oi->oi_owner = XFS_RMAP_OWN_UNKNOWN;
+       xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_NULL);
+}
+
+static inline bool
+xfs_rmap_should_skip_owner_update(
+       struct xfs_owner_info   *oi)
+{
+       return oi->oi_owner == XFS_RMAP_OWN_NULL;
+}
+
+static inline void
+xfs_rmap_any_owner_update(
+       struct xfs_owner_info   *oi)
+{
+       xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_UNKNOWN);
 }
 
 /* Reverse mapping functions. */
index 637b7a8..f120fb2 100644 (file)
@@ -318,8 +318,20 @@ xfs_scrub_dinode(
 
        /* di_mode */
        mode = be16_to_cpu(dip->di_mode);
-       if (mode & ~(S_IALLUGO | S_IFMT))
+       switch (mode & S_IFMT) {
+       case S_IFLNK:
+       case S_IFREG:
+       case S_IFDIR:
+       case S_IFCHR:
+       case S_IFBLK:
+       case S_IFIFO:
+       case S_IFSOCK:
+               /* mode is recognized */
+               break;
+       default:
                xfs_scrub_ino_set_corrupt(sc, ino, bp);
+               break;
+       }
 
        /* v1/v2 fields */
        switch (dip->di_version) {
index 8e58ba8..3d9037e 100644 (file)
@@ -107,7 +107,7 @@ xfs_scrub_quota_item(
        unsigned long long              rcount;
        xfs_ino_t                       fs_icount;
 
-       offset = id * qi->qi_dqperchunk;
+       offset = id / qi->qi_dqperchunk;
 
        /*
         * We fed $id and DQNEXT into the xfs_qm_dqget call, which means
@@ -207,7 +207,7 @@ xfs_scrub_quota(
        xfs_dqid_t                      id = 0;
        uint                            dqtype;
        int                             nimaps;
-       int                             error;
+       int                             error = 0;
 
        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return -ENOENT;
index 9c42c4e..ab3aef2 100644 (file)
@@ -46,7 +46,6 @@
 #include "scrub/scrub.h"
 #include "scrub/common.h"
 #include "scrub/trace.h"
-#include "scrub/scrub.h"
 #include "scrub/btree.h"
 
 /*
index 472080e..86daed0 100644 (file)
@@ -26,7 +26,6 @@
 #include "xfs_mount.h"
 #include "xfs_defer.h"
 #include "xfs_da_format.h"
-#include "xfs_defer.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_trans.h"
index a3eeaba..4fc526a 100644 (file)
@@ -399,7 +399,7 @@ xfs_map_blocks(
               (ip->i_df.if_flags & XFS_IFEXTENTS));
        ASSERT(offset <= mp->m_super->s_maxbytes);
 
-       if (offset + count > mp->m_super->s_maxbytes)
+       if (offset > mp->m_super->s_maxbytes - count)
                count = mp->m_super->s_maxbytes - offset;
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -896,13 +896,13 @@ xfs_writepage_map(
        struct writeback_control *wbc,
        struct inode            *inode,
        struct page             *page,
-       loff_t                  offset,
-       uint64_t              end_offset)
+       uint64_t                end_offset)
 {
        LIST_HEAD(submit_list);
        struct xfs_ioend        *ioend, *next;
        struct buffer_head      *bh, *head;
        ssize_t                 len = i_blocksize(inode);
+       uint64_t                offset;
        int                     error = 0;
        int                     count = 0;
        int                     uptodate = 1;
@@ -1146,7 +1146,7 @@ xfs_do_writepage(
                end_offset = offset;
        }
 
-       return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
+       return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
 
 redirty:
        redirty_page_for_writepage(wbc, page);
@@ -1265,7 +1265,7 @@ xfs_map_trim_size(
        if (mapping_size > size)
                mapping_size = size;
        if (offset < i_size_read(inode) &&
-           offset + mapping_size >= i_size_read(inode)) {
+           (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
                /* limit mapping to block that spans EOF */
                mapping_size = roundup_64(i_size_read(inode) - offset,
                                          i_blocksize(inode));
@@ -1312,7 +1312,7 @@ xfs_get_blocks(
        lockmode = xfs_ilock_data_map_shared(ip);
 
        ASSERT(offset <= mp->m_super->s_maxbytes);
-       if (offset + size > mp->m_super->s_maxbytes)
+       if (offset > mp->m_super->s_maxbytes - size)
                size = mp->m_super->s_maxbytes - offset;
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
index dd136f7..e5fb008 100644 (file)
@@ -389,7 +389,8 @@ xfs_bud_init(
 int
 xfs_bui_recover(
        struct xfs_mount                *mp,
-       struct xfs_bui_log_item         *buip)
+       struct xfs_bui_log_item         *buip,
+       struct xfs_defer_ops            *dfops)
 {
        int                             error = 0;
        unsigned int                    bui_type;
@@ -404,9 +405,7 @@ xfs_bui_recover(
        xfs_exntst_t                    state;
        struct xfs_trans                *tp;
        struct xfs_inode                *ip = NULL;
-       struct xfs_defer_ops            dfops;
        struct xfs_bmbt_irec            irec;
-       xfs_fsblock_t                   firstfsb;
 
        ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
 
@@ -464,7 +463,6 @@ xfs_bui_recover(
 
        if (VFS_I(ip)->i_nlink == 0)
                xfs_iflags_set(ip, XFS_IRECOVERY);
-       xfs_defer_init(&dfops, &firstfsb);
 
        /* Process deferred bmap item. */
        state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
@@ -479,16 +477,16 @@ xfs_bui_recover(
                break;
        default:
                error = -EFSCORRUPTED;
-               goto err_dfops;
+               goto err_inode;
        }
        xfs_trans_ijoin(tp, ip, 0);
 
        count = bmap->me_len;
-       error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type,
+       error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
                        ip, whichfork, bmap->me_startoff,
                        bmap->me_startblock, &count, state);
        if (error)
-               goto err_dfops;
+               goto err_inode;
 
        if (count > 0) {
                ASSERT(type == XFS_BMAP_UNMAP);
@@ -496,16 +494,11 @@ xfs_bui_recover(
                irec.br_blockcount = count;
                irec.br_startoff = bmap->me_startoff;
                irec.br_state = state;
-               error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec);
+               error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
                if (error)
-                       goto err_dfops;
+                       goto err_inode;
        }
 
-       /* Finish transaction, free inodes. */
-       error = xfs_defer_finish(&tp, &dfops);
-       if (error)
-               goto err_dfops;
-
        set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
        error = xfs_trans_commit(tp);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -513,8 +506,6 @@ xfs_bui_recover(
 
        return error;
 
-err_dfops:
-       xfs_defer_cancel(&dfops);
 err_inode:
        xfs_trans_cancel(tp);
        if (ip) {
index c867daa..24b354a 100644 (file)
@@ -93,6 +93,7 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *,
                struct xfs_bui_log_item *);
 void xfs_bui_item_free(struct xfs_bui_log_item *);
 void xfs_bui_release(struct xfs_bui_log_item *);
-int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip);
+int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip,
+               struct xfs_defer_ops *dfops);
 
 #endif /* __XFS_BMAP_ITEM_H__ */
index 4db6e8d..4c6e86d 100644 (file)
@@ -1815,22 +1815,27 @@ xfs_alloc_buftarg(
        btp->bt_daxdev = dax_dev;
 
        if (xfs_setsize_buftarg_early(btp, bdev))
-               goto error;
+               goto error_free;
 
        if (list_lru_init(&btp->bt_lru))
-               goto error;
+               goto error_free;
 
        if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
-               goto error;
+               goto error_lru;
 
        btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
        btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
        btp->bt_shrinker.seeks = DEFAULT_SEEKS;
        btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
-       register_shrinker(&btp->bt_shrinker);
+       if (register_shrinker(&btp->bt_shrinker))
+               goto error_pcpu;
        return btp;
 
-error:
+error_pcpu:
+       percpu_counter_destroy(&btp->bt_io_count);
+error_lru:
+       list_lru_destroy(&btp->bt_lru);
+error_free:
        kmem_free(btp);
        return NULL;
 }
index d57c2db..f248708 100644 (file)
@@ -970,14 +970,22 @@ xfs_qm_dqflush_done(
         * holding the lock before removing the dquot from the AIL.
         */
        if ((lip->li_flags & XFS_LI_IN_AIL) &&
-           lip->li_lsn == qip->qli_flush_lsn) {
+           ((lip->li_lsn == qip->qli_flush_lsn) ||
+            (lip->li_flags & XFS_LI_FAILED))) {
 
                /* xfs_trans_ail_delete() drops the AIL lock. */
                spin_lock(&ailp->xa_lock);
-               if (lip->li_lsn == qip->qli_flush_lsn)
+               if (lip->li_lsn == qip->qli_flush_lsn) {
                        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
-               else
+               } else {
+                       /*
+                        * Clear the failed state since we are about to drop the
+                        * flush lock
+                        */
+                       if (lip->li_flags & XFS_LI_FAILED)
+                               xfs_clear_li_failed(lip);
                        spin_unlock(&ailp->xa_lock);
+               }
        }
 
        /*
index 2c7a162..664dea1 100644 (file)
@@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait(
        wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
 }
 
+/*
+ * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
+ * have been failed during writeback
+ *
+ * this informs the AIL that the dquot is already flush locked on the next push,
+ * and acquires a hold on the buffer to ensure that it isn't reclaimed before
+ * dirty data makes it to disk.
+ */
+STATIC void
+xfs_dquot_item_error(
+       struct xfs_log_item     *lip,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dquot        *dqp;
+
+       dqp = DQUOT_ITEM(lip)->qli_dquot;
+       ASSERT(!completion_done(&dqp->q_flush));
+       xfs_set_li_failed(lip, bp);
+}
+
 STATIC uint
 xfs_qm_dquot_logitem_push(
        struct xfs_log_item     *lip,
@@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push(
                                              __acquires(&lip->li_ailp->xa_lock)
 {
        struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
-       struct xfs_buf          *bp = NULL;
+       struct xfs_buf          *bp = lip->li_buf;
        uint                    rval = XFS_ITEM_SUCCESS;
        int                     error;
 
        if (atomic_read(&dqp->q_pincount) > 0)
                return XFS_ITEM_PINNED;
 
+       /*
+        * The buffer containing this item failed to be written back
+        * previously. Resubmit the buffer for IO
+        */
+       if (lip->li_flags & XFS_LI_FAILED) {
+               if (!xfs_buf_trylock(bp))
+                       return XFS_ITEM_LOCKED;
+
+               if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
+                       rval = XFS_ITEM_FLUSHING;
+
+               xfs_buf_unlock(bp);
+               return rval;
+       }
+
        if (!xfs_dqlock_nowait(dqp))
                return XFS_ITEM_LOCKED;
 
@@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
        .iop_unlock     = xfs_qm_dquot_logitem_unlock,
        .iop_committed  = xfs_qm_dquot_logitem_committed,
        .iop_push       = xfs_qm_dquot_logitem_push,
-       .iop_committing = xfs_qm_dquot_logitem_committing
+       .iop_committing = xfs_qm_dquot_logitem_committing,
+       .iop_error      = xfs_dquot_item_error
 };
 
 /*
index 44f8c54..64da906 100644 (file)
@@ -538,7 +538,7 @@ xfs_efi_recover(
                return error;
        efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
 
-       xfs_rmap_skip_owner_update(&oinfo);
+       xfs_rmap_any_owner_update(&oinfo);
        for (i = 0; i < efip->efi_format.efi_nextents; i++) {
                extp = &efip->efi_format.efi_extents[i];
                error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
index 8f22fc5..60a2e12 100644 (file)
@@ -571,6 +571,11 @@ xfs_growfs_data_private(
                 * this doesn't actually exist in the rmap btree.
                 */
                xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
+               error = xfs_rmap_free(tp, bp, agno,
+                               be32_to_cpu(agf->agf_length) - new,
+                               new, &oinfo);
+               if (error)
+                       goto error0;
                error = xfs_free_extent(tp,
                                XFS_AGB_TO_FSB(mp, agno,
                                        be32_to_cpu(agf->agf_length) - new),
index 43005fb..3861d61 100644 (file)
@@ -870,7 +870,7 @@ xfs_eofblocks_worker(
  * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
  * (We'll just piggyback on the post-EOF prealloc space workqueue.)
  */
-STATIC void
+void
 xfs_queue_cowblocks(
        struct xfs_mount *mp)
 {
@@ -1536,8 +1536,23 @@ xfs_inode_free_quota_eofblocks(
        return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
 }
 
+static inline unsigned long
+xfs_iflag_for_tag(
+       int             tag)
+{
+       switch (tag) {
+       case XFS_ICI_EOFBLOCKS_TAG:
+               return XFS_IEOFBLOCKS;
+       case XFS_ICI_COWBLOCKS_TAG:
+               return XFS_ICOWBLOCKS;
+       default:
+               ASSERT(0);
+               return 0;
+       }
+}
+
 static void
-__xfs_inode_set_eofblocks_tag(
+__xfs_inode_set_blocks_tag(
        xfs_inode_t     *ip,
        void            (*execute)(struct xfs_mount *mp),
        void            (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
@@ -1552,10 +1567,10 @@ __xfs_inode_set_eofblocks_tag(
         * Don't bother locking the AG and looking up in the radix trees
         * if we already know that we have the tag set.
         */
-       if (ip->i_flags & XFS_IEOFBLOCKS)
+       if (ip->i_flags & xfs_iflag_for_tag(tag))
                return;
        spin_lock(&ip->i_flags_lock);
-       ip->i_flags |= XFS_IEOFBLOCKS;
+       ip->i_flags |= xfs_iflag_for_tag(tag);
        spin_unlock(&ip->i_flags_lock);
 
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -1587,13 +1602,13 @@ xfs_inode_set_eofblocks_tag(
        xfs_inode_t     *ip)
 {
        trace_xfs_inode_set_eofblocks_tag(ip);
-       return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
+       return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
                        trace_xfs_perag_set_eofblocks,
                        XFS_ICI_EOFBLOCKS_TAG);
 }
 
 static void
-__xfs_inode_clear_eofblocks_tag(
+__xfs_inode_clear_blocks_tag(
        xfs_inode_t     *ip,
        void            (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
                                    int error, unsigned long caller_ip),
@@ -1603,7 +1618,7 @@ __xfs_inode_clear_eofblocks_tag(
        struct xfs_perag *pag;
 
        spin_lock(&ip->i_flags_lock);
-       ip->i_flags &= ~XFS_IEOFBLOCKS;
+       ip->i_flags &= ~xfs_iflag_for_tag(tag);
        spin_unlock(&ip->i_flags_lock);
 
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -1630,7 +1645,7 @@ xfs_inode_clear_eofblocks_tag(
        xfs_inode_t     *ip)
 {
        trace_xfs_inode_clear_eofblocks_tag(ip);
-       return __xfs_inode_clear_eofblocks_tag(ip,
+       return __xfs_inode_clear_blocks_tag(ip,
                        trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
 }
 
@@ -1724,7 +1739,7 @@ xfs_inode_set_cowblocks_tag(
        xfs_inode_t     *ip)
 {
        trace_xfs_inode_set_cowblocks_tag(ip);
-       return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
+       return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
                        trace_xfs_perag_set_cowblocks,
                        XFS_ICI_COWBLOCKS_TAG);
 }
@@ -1734,6 +1749,6 @@ xfs_inode_clear_cowblocks_tag(
        xfs_inode_t     *ip)
 {
        trace_xfs_inode_clear_cowblocks_tag(ip);
-       return __xfs_inode_clear_eofblocks_tag(ip,
+       return __xfs_inode_clear_blocks_tag(ip,
                        trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
 }
index bff4d85..d4a7758 100644 (file)
@@ -81,6 +81,7 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
 int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *);
 int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip);
 void xfs_cowblocks_worker(struct work_struct *);
+void xfs_queue_cowblocks(struct xfs_mount *);
 
 int xfs_inode_ag_iterator(struct xfs_mount *mp,
        int (*execute)(struct xfs_inode *ip, int flags, void *args),
index 61d1cb7..6f95bdb 100644 (file)
@@ -749,7 +749,6 @@ xfs_ialloc(
        xfs_nlink_t     nlink,
        dev_t           rdev,
        prid_t          prid,
-       int             okalloc,
        xfs_buf_t       **ialloc_context,
        xfs_inode_t     **ipp)
 {
@@ -765,7 +764,7 @@ xfs_ialloc(
         * Call the space management code to pick
         * the on-disk inode to be allocated.
         */
-       error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
+       error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
                            ialloc_context, &ino);
        if (error)
                return error;
@@ -957,7 +956,6 @@ xfs_dir_ialloc(
        xfs_nlink_t     nlink,
        dev_t           rdev,
        prid_t          prid,           /* project id */
-       int             okalloc,        /* ok to allocate new space */
        xfs_inode_t     **ipp,          /* pointer to inode; it will be
                                           locked. */
        int             *committed)
@@ -988,8 +986,8 @@ xfs_dir_ialloc(
         * transaction commit so that no other process can steal
         * the inode(s) that we've just allocated.
         */
-       code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
-                         &ialloc_context, &ip);
+       code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
+                       &ip);
 
        /*
         * Return an error if we were unable to allocate a new inode.
@@ -1061,7 +1059,7 @@ xfs_dir_ialloc(
                 * this call should always succeed.
                 */
                code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
-                                 okalloc, &ialloc_context, &ip);
+                                 &ialloc_context, &ip);
 
                /*
                 * If we get an error at this point, return to the caller
@@ -1182,11 +1180,6 @@ xfs_create(
                xfs_flush_inodes(mp);
                error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
        }
-       if (error == -ENOSPC) {
-               /* No space at all so try a "no-allocation" reservation */
-               resblks = 0;
-               error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
-       }
        if (error)
                goto out_release_inode;
 
@@ -1203,19 +1196,13 @@ xfs_create(
        if (error)
                goto out_trans_cancel;
 
-       if (!resblks) {
-               error = xfs_dir_canenter(tp, dp, name);
-               if (error)
-                       goto out_trans_cancel;
-       }
-
        /*
         * A newly created regular or special file just has one directory
         * entry pointing to them, but a directory also the "." entry
         * pointing to itself.
         */
-       error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
-                              prid, resblks > 0, &ip, NULL);
+       error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip,
+                       NULL);
        if (error)
                goto out_trans_cancel;
 
@@ -1340,11 +1327,6 @@ xfs_create_tmpfile(
        tres = &M_RES(mp)->tr_create_tmpfile;
 
        error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
-       if (error == -ENOSPC) {
-               /* No space at all so try a "no-allocation" reservation */
-               resblks = 0;
-               error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
-       }
        if (error)
                goto out_release_inode;
 
@@ -1353,8 +1335,7 @@ xfs_create_tmpfile(
        if (error)
                goto out_trans_cancel;
 
-       error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
-                               prid, resblks > 0, &ip, NULL);
+       error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip, NULL);
        if (error)
                goto out_trans_cancel;
 
@@ -1506,6 +1487,24 @@ xfs_link(
        return error;
 }
 
+/* Clear the reflink flag and the cowblocks tag if possible. */
+static void
+xfs_itruncate_clear_reflink_flags(
+       struct xfs_inode        *ip)
+{
+       struct xfs_ifork        *dfork;
+       struct xfs_ifork        *cfork;
+
+       if (!xfs_is_reflink_inode(ip))
+               return;
+       dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+       cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+       if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
+               ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+       if (cfork->if_bytes == 0)
+               xfs_inode_clear_cowblocks_tag(ip);
+}
+
 /*
  * Free up the underlying blocks past new_size.  The new size must be smaller
  * than the current size.  This routine can be used both for the attribute and
@@ -1602,15 +1601,7 @@ xfs_itruncate_extents(
        if (error)
                goto out;
 
-       /*
-        * Clear the reflink flag if there are no data fork blocks and
-        * there are no extents staged in the cow fork.
-        */
-       if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
-               if (ip->i_d.di_nblocks == 0)
-                       ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
-               xfs_inode_clear_cowblocks_tag(ip);
-       }
+       xfs_itruncate_clear_reflink_flags(ip);
 
        /*
         * Always re-log the inode so that our permanent transaction can keep
@@ -2400,6 +2391,24 @@ retry:
        return 0;
 }
 
+/*
+ * Free any local-format buffers sitting around before we reset to
+ * extents format.
+ */
+static inline void
+xfs_ifree_local_data(
+       struct xfs_inode        *ip,
+       int                     whichfork)
+{
+       struct xfs_ifork        *ifp;
+
+       if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
+               return;
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
+}
+
 /*
  * This is called to return an inode to the inode free list.
  * The inode should already be truncated to 0 length and have
@@ -2437,6 +2446,9 @@ xfs_ifree(
        if (error)
                return error;
 
+       xfs_ifree_local_data(ip, XFS_DATA_FORK);
+       xfs_ifree_local_data(ip, XFS_ATTR_FORK);
+
        VFS_I(ip)->i_mode = 0;          /* mark incore inode as free */
        ip->i_d.di_flags = 0;
        ip->i_d.di_dmevmask = 0;
index cc13c37..d383e39 100644 (file)
@@ -232,6 +232,7 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
  * log recovery to replay a bmap operation on the inode.
  */
 #define XFS_IRECOVERY          (1 << 11)
+#define XFS_ICOWBLOCKS         (1 << 12)/* has the cowblocks tag set */
 
 /*
  * Per-lifetime flags need to be reset when re-using a reclaimable inode during
@@ -428,7 +429,7 @@ xfs_extlen_t        xfs_get_extsz_hint(struct xfs_inode *ip);
 xfs_extlen_t   xfs_get_cowextsz_hint(struct xfs_inode *ip);
 
 int            xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
-                              xfs_nlink_t, dev_t, prid_t, int,
+                              xfs_nlink_t, dev_t, prid_t,
                               struct xfs_inode **, int *);
 
 /* from xfs_file.c */
index 33eb4fb..66e1edb 100644 (file)
@@ -1006,7 +1006,7 @@ xfs_file_iomap_begin(
        }
 
        ASSERT(offset <= mp->m_super->s_maxbytes);
-       if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
+       if (offset > mp->m_super->s_maxbytes - length)
                length = mp->m_super->s_maxbytes - offset;
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
        end_fsb = XFS_B_TO_FSB(mp, offset + length);
@@ -1213,7 +1213,7 @@ xfs_xattr_iomap_begin(
 
        ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
-                              &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
+                              &nimaps, XFS_BMAPI_ATTRFORK);
 out_unlock:
        xfs_iunlock(ip, lockmode);
 
index 38d4227..a503af9 100644 (file)
@@ -781,17 +781,17 @@ xfs_log_mount_finish(
         * something to an unlinked inode, the irele won't cause
         * premature truncation and freeing of the inode, which results
         * in log recovery failure.  We have to evict the unreferenced
-        * lru inodes after clearing MS_ACTIVE because we don't
+        * lru inodes after clearing SB_ACTIVE because we don't
         * otherwise clean up the lru if there's a subsequent failure in
         * xfs_mountfs, which leads to us leaking the inodes if nothing
         * else (e.g. quotacheck) references the inodes before the
         * mount failure occurs.
         */
-       mp->m_super->s_flags |= MS_ACTIVE;
+       mp->m_super->s_flags |= SB_ACTIVE;
        error = xlog_recover_finish(mp->m_log);
        if (!error)
                xfs_log_work_queue(mp);
-       mp->m_super->s_flags &= ~MS_ACTIVE;
+       mp->m_super->s_flags &= ~SB_ACTIVE;
        evict_inodes(mp->m_super);
 
        /*
index 87b1c33..28d1abf 100644 (file)
@@ -24,6 +24,7 @@
 #include "xfs_bit.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_defer.h"
 #include "xfs_da_format.h"
 #include "xfs_da_btree.h"
 #include "xfs_inode.h"
@@ -4716,7 +4717,8 @@ STATIC int
 xlog_recover_process_cui(
        struct xfs_mount                *mp,
        struct xfs_ail                  *ailp,
-       struct xfs_log_item             *lip)
+       struct xfs_log_item             *lip,
+       struct xfs_defer_ops            *dfops)
 {
        struct xfs_cui_log_item         *cuip;
        int                             error;
@@ -4729,7 +4731,7 @@ xlog_recover_process_cui(
                return 0;
 
        spin_unlock(&ailp->xa_lock);
-       error = xfs_cui_recover(mp, cuip);
+       error = xfs_cui_recover(mp, cuip, dfops);
        spin_lock(&ailp->xa_lock);
 
        return error;
@@ -4756,7 +4758,8 @@ STATIC int
 xlog_recover_process_bui(
        struct xfs_mount                *mp,
        struct xfs_ail                  *ailp,
-       struct xfs_log_item             *lip)
+       struct xfs_log_item             *lip,
+       struct xfs_defer_ops            *dfops)
 {
        struct xfs_bui_log_item         *buip;
        int                             error;
@@ -4769,7 +4772,7 @@ xlog_recover_process_bui(
                return 0;
 
        spin_unlock(&ailp->xa_lock);
-       error = xfs_bui_recover(mp, buip);
+       error = xfs_bui_recover(mp, buip, dfops);
        spin_lock(&ailp->xa_lock);
 
        return error;
@@ -4805,6 +4808,46 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
        }
 }
 
+/* Take all the collected deferred ops and finish them in order. */
+static int
+xlog_finish_defer_ops(
+       struct xfs_mount        *mp,
+       struct xfs_defer_ops    *dfops)
+{
+       struct xfs_trans        *tp;
+       int64_t                 freeblks;
+       uint                    resblks;
+       int                     error;
+
+       /*
+        * We're finishing the defer_ops that accumulated as a result of
+        * recovering unfinished intent items during log recovery.  We
+        * reserve an itruncate transaction because it is the largest
+        * permanent transaction type.  Since we're the only user of the fs
+        * right now, take 93% (15/16) of the available free blocks.  Use
+        * weird math to avoid a 64-bit division.
+        */
+       freeblks = percpu_counter_sum(&mp->m_fdblocks);
+       if (freeblks <= 0)
+               return -ENOSPC;
+       resblks = min_t(int64_t, UINT_MAX, freeblks);
+       resblks = (resblks * 15) >> 4;
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
+                       0, XFS_TRANS_RESERVE, &tp);
+       if (error)
+               return error;
+
+       error = xfs_defer_finish(&tp, dfops);
+       if (error)
+               goto out_cancel;
+
+       return xfs_trans_commit(tp);
+
+out_cancel:
+       xfs_trans_cancel(tp);
+       return error;
+}
+
 /*
  * When this is called, all of the log intent items which did not have
  * corresponding log done items should be in the AIL.  What we do now
@@ -4825,10 +4868,12 @@ STATIC int
 xlog_recover_process_intents(
        struct xlog             *log)
 {
-       struct xfs_log_item     *lip;
-       int                     error = 0;
+       struct xfs_defer_ops    dfops;
        struct xfs_ail_cursor   cur;
+       struct xfs_log_item     *lip;
        struct xfs_ail          *ailp;
+       xfs_fsblock_t           firstfsb;
+       int                     error = 0;
 #if defined(DEBUG) || defined(XFS_WARN)
        xfs_lsn_t               last_lsn;
 #endif
@@ -4839,6 +4884,7 @@ xlog_recover_process_intents(
 #if defined(DEBUG) || defined(XFS_WARN)
        last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
 #endif
+       xfs_defer_init(&dfops, &firstfsb);
        while (lip != NULL) {
                /*
                 * We're done when we see something other than an intent.
@@ -4859,6 +4905,12 @@ xlog_recover_process_intents(
                 */
                ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
 
+               /*
+                * NOTE: If your intent processing routine can create more
+                * deferred ops, you /must/ attach them to the dfops in this
+                * routine or else those subsequent intents will get
+                * replayed in the wrong order!
+                */
                switch (lip->li_type) {
                case XFS_LI_EFI:
                        error = xlog_recover_process_efi(log->l_mp, ailp, lip);
@@ -4867,10 +4919,12 @@ xlog_recover_process_intents(
                        error = xlog_recover_process_rui(log->l_mp, ailp, lip);
                        break;
                case XFS_LI_CUI:
-                       error = xlog_recover_process_cui(log->l_mp, ailp, lip);
+                       error = xlog_recover_process_cui(log->l_mp, ailp, lip,
+                                       &dfops);
                        break;
                case XFS_LI_BUI:
-                       error = xlog_recover_process_bui(log->l_mp, ailp, lip);
+                       error = xlog_recover_process_bui(log->l_mp, ailp, lip,
+                                       &dfops);
                        break;
                }
                if (error)
@@ -4880,6 +4934,11 @@ xlog_recover_process_intents(
 out:
        xfs_trans_ail_cursor_done(&cur);
        spin_unlock(&ailp->xa_lock);
+       if (error)
+               xfs_defer_cancel(&dfops);
+       else
+               error = xlog_finish_defer_ops(log->l_mp, &dfops);
+
        return error;
 }
 
index 010a13a..b897b11 100644 (file)
@@ -48,7 +48,7 @@
 STATIC int     xfs_qm_init_quotainos(xfs_mount_t *);
 STATIC int     xfs_qm_init_quotainfo(xfs_mount_t *);
 
-
+STATIC void    xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
 STATIC void    xfs_qm_dqfree_one(struct xfs_dquot *dqp);
 /*
  * We use the batch lookup interface to iterate over the dquots as it
@@ -695,9 +695,17 @@ xfs_qm_init_quotainfo(
        qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
        qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
        qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
-       register_shrinker(&qinf->qi_shrinker);
+
+       error = register_shrinker(&qinf->qi_shrinker);
+       if (error)
+               goto out_free_inos;
+
        return 0;
 
+out_free_inos:
+       mutex_destroy(&qinf->qi_quotaofflock);
+       mutex_destroy(&qinf->qi_tree_lock);
+       xfs_qm_destroy_quotainos(qinf);
 out_free_lru:
        list_lru_destroy(&qinf->qi_lru);
 out_free_qinf:
@@ -706,7 +714,6 @@ out_free_qinf:
        return error;
 }
 
-
 /*
  * Gets called when unmounting a filesystem or when all quotas get
  * turned off.
@@ -723,19 +730,8 @@ xfs_qm_destroy_quotainfo(
 
        unregister_shrinker(&qi->qi_shrinker);
        list_lru_destroy(&qi->qi_lru);
-
-       if (qi->qi_uquotaip) {
-               IRELE(qi->qi_uquotaip);
-               qi->qi_uquotaip = NULL; /* paranoia */
-       }
-       if (qi->qi_gquotaip) {
-               IRELE(qi->qi_gquotaip);
-               qi->qi_gquotaip = NULL;
-       }
-       if (qi->qi_pquotaip) {
-               IRELE(qi->qi_pquotaip);
-               qi->qi_pquotaip = NULL;
-       }
+       xfs_qm_destroy_quotainos(qi);
+       mutex_destroy(&qi->qi_tree_lock);
        mutex_destroy(&qi->qi_quotaofflock);
        kmem_free(qi);
        mp->m_quotainfo = NULL;
@@ -793,8 +789,8 @@ xfs_qm_qino_alloc(
                return error;
 
        if (need_alloc) {
-               error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
-                                                               &committed);
+               error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip,
+                               &committed);
                if (error) {
                        xfs_trans_cancel(tp);
                        return error;
@@ -1599,6 +1595,24 @@ error_rele:
        return error;
 }
 
+STATIC void
+xfs_qm_destroy_quotainos(
+       xfs_quotainfo_t *qi)
+{
+       if (qi->qi_uquotaip) {
+               IRELE(qi->qi_uquotaip);
+               qi->qi_uquotaip = NULL; /* paranoia */
+       }
+       if (qi->qi_gquotaip) {
+               IRELE(qi->qi_gquotaip);
+               qi->qi_gquotaip = NULL;
+       }
+       if (qi->qi_pquotaip) {
+               IRELE(qi->qi_pquotaip);
+               qi->qi_pquotaip = NULL;
+       }
+}
+
 STATIC void
 xfs_qm_dqfree_one(
        struct xfs_dquot        *dqp)
index 8f2e2fa..3a55d6f 100644 (file)
@@ -393,7 +393,8 @@ xfs_cud_init(
 int
 xfs_cui_recover(
        struct xfs_mount                *mp,
-       struct xfs_cui_log_item         *cuip)
+       struct xfs_cui_log_item         *cuip,
+       struct xfs_defer_ops            *dfops)
 {
        int                             i;
        int                             error = 0;
@@ -405,11 +406,9 @@ xfs_cui_recover(
        struct xfs_trans                *tp;
        struct xfs_btree_cur            *rcur = NULL;
        enum xfs_refcount_intent_type   type;
-       xfs_fsblock_t                   firstfsb;
        xfs_fsblock_t                   new_fsb;
        xfs_extlen_t                    new_len;
        struct xfs_bmbt_irec            irec;
-       struct xfs_defer_ops            dfops;
        bool                            requeue_only = false;
 
        ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
@@ -465,7 +464,6 @@ xfs_cui_recover(
                return error;
        cudp = xfs_trans_get_cud(tp, cuip);
 
-       xfs_defer_init(&dfops, &firstfsb);
        for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
                refc = &cuip->cui_format.cui_extents[i];
                refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
@@ -485,7 +483,7 @@ xfs_cui_recover(
                        new_len = refc->pe_len;
                } else
                        error = xfs_trans_log_finish_refcount_update(tp, cudp,
-                               &dfops, type, refc->pe_startblock, refc->pe_len,
+                               dfops, type, refc->pe_startblock, refc->pe_len,
                                &new_fsb, &new_len, &rcur);
                if (error)
                        goto abort_error;
@@ -497,21 +495,21 @@ xfs_cui_recover(
                        switch (type) {
                        case XFS_REFCOUNT_INCREASE:
                                error = xfs_refcount_increase_extent(
-                                               tp->t_mountp, &dfops, &irec);
+                                               tp->t_mountp, dfops, &irec);
                                break;
                        case XFS_REFCOUNT_DECREASE:
                                error = xfs_refcount_decrease_extent(
-                                               tp->t_mountp, &dfops, &irec);
+                                               tp->t_mountp, dfops, &irec);
                                break;
                        case XFS_REFCOUNT_ALLOC_COW:
                                error = xfs_refcount_alloc_cow_extent(
-                                               tp->t_mountp, &dfops,
+                                               tp->t_mountp, dfops,
                                                irec.br_startblock,
                                                irec.br_blockcount);
                                break;
                        case XFS_REFCOUNT_FREE_COW:
                                error = xfs_refcount_free_cow_extent(
-                                               tp->t_mountp, &dfops,
+                                               tp->t_mountp, dfops,
                                                irec.br_startblock,
                                                irec.br_blockcount);
                                break;
@@ -525,17 +523,12 @@ xfs_cui_recover(
        }
 
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
-       error = xfs_defer_finish(&tp, &dfops);
-       if (error)
-               goto abort_defer;
        set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
        error = xfs_trans_commit(tp);
        return error;
 
 abort_error:
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
-abort_defer:
-       xfs_defer_cancel(&dfops);
        xfs_trans_cancel(tp);
        return error;
 }
index 5b74ddd..0e53273 100644 (file)
@@ -96,6 +96,7 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *,
                struct xfs_cui_log_item *);
 void xfs_cui_item_free(struct xfs_cui_log_item *);
 void xfs_cui_release(struct xfs_cui_log_item *);
-int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip);
+int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip,
+               struct xfs_defer_ops *dfops);
 
 #endif /* __XFS_REFCOUNT_ITEM_H__ */
index cc041a2..47aea2e 100644 (file)
@@ -49,8 +49,6 @@
 #include "xfs_alloc.h"
 #include "xfs_quota_defs.h"
 #include "xfs_quota.h"
-#include "xfs_btree.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_reflink.h"
 #include "xfs_iomap.h"
 #include "xfs_rmap_btree.h"
@@ -456,6 +454,8 @@ retry:
        if (error)
                goto out_bmap_cancel;
 
+       xfs_inode_set_cowblocks_tag(ip);
+
        /* Finish up. */
        error = xfs_defer_finish(&tp, &dfops);
        if (error)
@@ -492,8 +492,9 @@ xfs_reflink_find_cow_mapping(
        struct xfs_iext_cursor          icur;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
-       ASSERT(xfs_is_reflink_inode(ip));
 
+       if (!xfs_is_reflink_inode(ip))
+               return false;
        offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
        if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
                return false;
@@ -612,6 +613,9 @@ xfs_reflink_cancel_cow_blocks(
 
                        /* Remove the mapping from the CoW fork. */
                        xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
+               } else {
+                       /* Didn't do anything, push cursor back. */
+                       xfs_iext_prev(ifp, &icur);
                }
 next_extent:
                if (!xfs_iext_get_extent(ifp, &icur, &got))
@@ -727,7 +731,7 @@ xfs_reflink_end_cow(
                        (unsigned int)(end_fsb - offset_fsb),
                        XFS_DATA_FORK);
        error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
-                       resblks, 0, 0, &tp);
+                       resblks, 0, XFS_TRANS_RESERVE, &tp);
        if (error)
                goto out;
 
@@ -1293,6 +1297,17 @@ xfs_reflink_remap_range(
 
        trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
 
+       /*
+        * Clear out post-eof preallocations because we don't have page cache
+        * backing the delayed allocations and they'll never get freed on
+        * their own.
+        */
+       if (xfs_can_free_eofblocks(dest, true)) {
+               ret = xfs_free_eofblocks(dest);
+               if (ret)
+                       goto out_unlock;
+       }
+
        /* Set flags and remap blocks. */
        ret = xfs_reflink_set_inode_flag(src, dest);
        if (ret)
index f663022..1dacccc 100644 (file)
@@ -212,9 +212,9 @@ xfs_parseargs(
         */
        if (sb_rdonly(sb))
                mp->m_flags |= XFS_MOUNT_RDONLY;
-       if (sb->s_flags & MS_DIRSYNC)
+       if (sb->s_flags & SB_DIRSYNC)
                mp->m_flags |= XFS_MOUNT_DIRSYNC;
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                mp->m_flags |= XFS_MOUNT_WSYNC;
 
        /*
@@ -1312,7 +1312,7 @@ xfs_fs_remount(
        }
 
        /* ro -> rw */
-       if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+       if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
                if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
                        xfs_warn(mp,
                "ro->rw transition prohibited on norecovery mount");
@@ -1360,6 +1360,7 @@ xfs_fs_remount(
                        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
                        return error;
                }
+               xfs_queue_cowblocks(mp);
 
                /* Create the per-AG metadata reservation pool .*/
                error = xfs_fs_reserve_ag_blocks(mp);
@@ -1368,7 +1369,15 @@ xfs_fs_remount(
        }
 
        /* rw -> ro */
-       if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+       if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
+               /* Get rid of any leftover CoW reservations... */
+               cancel_delayed_work_sync(&mp->m_cowblocks_work);
+               error = xfs_icache_free_cowblocks(mp, NULL);
+               if (error) {
+                       xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+                       return error;
+               }
+
                /* Free the per-AG metadata reservation pool. */
                error = xfs_fs_unreserve_ag_blocks(mp);
                if (error) {
index 5f2f324..fcc5dfc 100644 (file)
@@ -30,7 +30,7 @@ extern void xfs_qm_exit(void);
 
 #ifdef CONFIG_XFS_POSIX_ACL
 # define XFS_ACL_STRING                "ACLs, "
-# define set_posix_acl_flag(sb)        ((sb)->s_flags |= MS_POSIXACL)
+# define set_posix_acl_flag(sb)        ((sb)->s_flags |= SB_POSIXACL)
 #else
 # define XFS_ACL_STRING
 # define set_posix_acl_flag(sb)        do { } while (0)
index 68d3ca2..2e9e793 100644 (file)
@@ -232,11 +232,6 @@ xfs_symlink(
        resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
 
        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
-       if (error == -ENOSPC && fs_blocks == 0) {
-               resblks = 0;
-               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,
-                               &tp);
-       }
        if (error)
                goto out_release_inode;
 
@@ -259,14 +254,6 @@ xfs_symlink(
        if (error)
                goto out_trans_cancel;
 
-       /*
-        * Check for ability to enter directory entry, if no space reserved.
-        */
-       if (!resblks) {
-               error = xfs_dir_canenter(tp, dp, link_name);
-               if (error)
-                       goto out_trans_cancel;
-       }
        /*
         * Initialize the bmap freelist prior to calling either
         * bmapi or the directory create code.
@@ -277,7 +264,7 @@ xfs_symlink(
         * Allocate an inode for the symlink.
         */
        error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
-                              prid, resblks > 0, &ip, NULL);
+                              prid, &ip, NULL);
        if (error)
                goto out_trans_cancel;
 
index 5d95fe3..35f3546 100644 (file)
@@ -24,7 +24,6 @@
 #include "xfs_mount.h"
 #include "xfs_defer.h"
 #include "xfs_da_format.h"
-#include "xfs_defer.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_da_btree.h"
index 6db3b46..ffe364f 100644 (file)
 
 #define ACPI_ADDRESS_RANGE_MAX          2
 
-/* Maximum number of While() loops before abort */
+/* Maximum time (default 30s) of While() loops before abort */
 
-#define ACPI_MAX_LOOP_COUNT             0x000FFFFF
+#define ACPI_MAX_LOOP_TIMEOUT           30
 
 /******************************************************************************
  *
index 17d61b1..3c46f0e 100644 (file)
@@ -130,8 +130,9 @@ struct acpi_exception_info {
 #define AE_HEX_OVERFLOW                 EXCEP_ENV (0x0020)
 #define AE_DECIMAL_OVERFLOW             EXCEP_ENV (0x0021)
 #define AE_OCTAL_OVERFLOW               EXCEP_ENV (0x0022)
+#define AE_END_OF_TABLE                 EXCEP_ENV (0x0023)
 
-#define AE_CODE_ENV_MAX                 0x0022
+#define AE_CODE_ENV_MAX                 0x0023
 
 /*
  * Programmer exceptions
@@ -195,7 +196,7 @@ struct acpi_exception_info {
 #define AE_AML_CIRCULAR_REFERENCE       EXCEP_AML (0x001E)
 #define AE_AML_BAD_RESOURCE_LENGTH      EXCEP_AML (0x001F)
 #define AE_AML_ILLEGAL_ADDRESS          EXCEP_AML (0x0020)
-#define AE_AML_INFINITE_LOOP            EXCEP_AML (0x0021)
+#define AE_AML_LOOP_TIMEOUT             EXCEP_AML (0x0021)
 #define AE_AML_UNINITIALIZED_NODE       EXCEP_AML (0x0022)
 #define AE_AML_TARGET_TYPE              EXCEP_AML (0x0023)
 
@@ -275,7 +276,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
        EXCEP_TXT("AE_DECIMAL_OVERFLOW",
                  "Overflow during ASCII decimal-to-binary conversion"),
        EXCEP_TXT("AE_OCTAL_OVERFLOW",
-                 "Overflow during ASCII octal-to-binary conversion")
+                 "Overflow during ASCII octal-to-binary conversion"),
+       EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table")
 };
 
 static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
@@ -368,8 +370,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
                  "The length of a Resource Descriptor in the AML is incorrect"),
        EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
                  "A memory, I/O, or PCI configuration address is invalid"),
-       EXCEP_TXT("AE_AML_INFINITE_LOOP",
-                 "An apparent infinite AML While loop, method was aborted"),
+       EXCEP_TXT("AE_AML_LOOP_TIMEOUT",
+                 "An AML While loop exceeded the maximum execution time"),
        EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
                  "A namespace node is uninitialized or unresolved"),
        EXCEP_TXT("AE_AML_TARGET_TYPE",
index f849be2..c9608b0 100644 (file)
@@ -91,6 +91,9 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
 bool acpi_dev_found(const char *hid);
 bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
 
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv);
+
 #ifdef CONFIG_ACPI
 
 #include <linux/proc_fs.h>
@@ -105,6 +108,7 @@ enum acpi_bus_device_type {
        ACPI_BUS_TYPE_THERMAL,
        ACPI_BUS_TYPE_POWER_BUTTON,
        ACPI_BUS_TYPE_SLEEP_BUTTON,
+       ACPI_BUS_TYPE_ECDT_EC,
        ACPI_BUS_DEVICE_TYPE_COUNT
 };
 
index 29c6912..1449975 100644 (file)
@@ -58,6 +58,7 @@
 #define ACPI_VIDEO_HID                 "LNXVIDEO"
 #define ACPI_BAY_HID                   "LNXIOBAY"
 #define ACPI_DOCK_HID                  "LNXDOCK"
+#define ACPI_ECDT_HID                  "LNXEC"
 /* Quirk for broken IBM BIOSes */
 #define ACPI_SMBUS_IBM_HID             "SMBUSIBM"
 
index e1dd1a8..c589c3e 100644 (file)
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20170831
+#define ACPI_CA_VERSION                 0x20171215
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -260,11 +260,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
 
 /*
- * Maximum number of While() loop iterations before forced method abort.
+ * Maximum timeout for While() loop iterations before forced method abort.
  * This mechanism is intended to prevent infinite loops during interpreter
  * execution within a host kernel.
  */
-ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT);
 
 /*
  * This mechanism is used to trace a specified AML method. The method is
index 7a89e6d..4c304bf 100644 (file)
 #define ACPI_SIG_HEST           "HEST" /* Hardware Error Source Table */
 #define ACPI_SIG_MADT           "APIC" /* Multiple APIC Description Table */
 #define ACPI_SIG_MSCT           "MSCT" /* Maximum System Characteristics Table */
-#define ACPI_SIG_PDTT           "PDTT" /* Processor Debug Trigger Table */
+#define ACPI_SIG_PDTT           "PDTT" /* Platform Debug Trigger Table */
 #define ACPI_SIG_PPTT           "PPTT" /* Processor Properties Topology Table */
 #define ACPI_SIG_SBST           "SBST" /* Smart Battery Specification Table */
+#define ACPI_SIG_SDEV           "SDEV" /* Secure Devices table */
 #define ACPI_SIG_SLIT           "SLIT" /* System Locality Distance Information Table */
 #define ACPI_SIG_SRAT           "SRAT" /* System Resource Affinity Table */
 #define ACPI_SIG_NFIT           "NFIT" /* NVDIMM Firmware Interface Table */
@@ -1149,7 +1150,8 @@ enum acpi_nfit_type {
        ACPI_NFIT_TYPE_CONTROL_REGION = 4,
        ACPI_NFIT_TYPE_DATA_REGION = 5,
        ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
-       ACPI_NFIT_TYPE_RESERVED = 7     /* 7 and greater are reserved */
+       ACPI_NFIT_TYPE_CAPABILITIES = 7,
+       ACPI_NFIT_TYPE_RESERVED = 8     /* 8 and greater are reserved */
 };
 
 /*
@@ -1162,7 +1164,7 @@ struct acpi_nfit_system_address {
        struct acpi_nfit_header header;
        u16 range_index;
        u16 flags;
-       u32 reserved;           /* Reseved, must be zero */
+       u32 reserved;           /* Reserved, must be zero */
        u32 proximity_domain;
        u8 range_guid[16];
        u64 address;
@@ -1281,9 +1283,72 @@ struct acpi_nfit_flush_address {
        u64 hint_address[1];    /* Variable length */
 };
 
+/* 7: Platform Capabilities Structure */
+
+struct acpi_nfit_capabilities {
+       struct acpi_nfit_header header;
+       u8 highest_capability;
+       u8 reserved[3];         /* Reserved, must be zero */
+       u32 capabilities;
+       u32 reserved2;
+};
+
+/* Capabilities Flags */
+
+#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH       (1)     /* 00: Cache Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_FLUSH         (1<<1)  /* 01: Memory Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING     (1<<2)  /* 02: Memory Mirroring capable */
+
+/*
+ * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM
+ */
+struct nfit_device_handle {
+       u32 handle;
+};
+
+/* Device handle construction and extraction macros */
+
+#define ACPI_NFIT_DIMM_NUMBER_MASK              0x0000000F
+#define ACPI_NFIT_CHANNEL_NUMBER_MASK           0x000000F0
+#define ACPI_NFIT_MEMORY_ID_MASK                0x00000F00
+#define ACPI_NFIT_SOCKET_ID_MASK                0x0000F000
+#define ACPI_NFIT_NODE_ID_MASK                  0x0FFF0000
+
+#define ACPI_NFIT_DIMM_NUMBER_OFFSET            0
+#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET         4
+#define ACPI_NFIT_MEMORY_ID_OFFSET              8
+#define ACPI_NFIT_SOCKET_ID_OFFSET              12
+#define ACPI_NFIT_NODE_ID_OFFSET                16
+
+/* Macro to construct a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \
+       ((dimm)                                         | \
+       ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET)  | \
+       ((memory)  << ACPI_NFIT_MEMORY_ID_OFFSET)       | \
+       ((socket)  << ACPI_NFIT_SOCKET_ID_OFFSET)       | \
+       ((node)    << ACPI_NFIT_NODE_ID_OFFSET))
+
+/* Macros to extract individual fields from a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \
+       ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK)
+
+#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \
+       (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET)
+
+#define ACPI_NFIT_GET_MEMORY_ID(handle) \
+       (((handle) & ACPI_NFIT_MEMORY_ID_MASK)      >> ACPI_NFIT_MEMORY_ID_OFFSET)
+
+#define ACPI_NFIT_GET_SOCKET_ID(handle) \
+       (((handle) & ACPI_NFIT_SOCKET_ID_MASK)      >> ACPI_NFIT_SOCKET_ID_OFFSET)
+
+#define ACPI_NFIT_GET_NODE_ID(handle) \
+       (((handle) & ACPI_NFIT_NODE_ID_MASK)        >> ACPI_NFIT_NODE_ID_OFFSET)
+
 /*******************************************************************************
  *
- * PDTT - Processor Debug Trigger Table (ACPI 6.2)
+ * PDTT - Platform Debug Trigger Table (ACPI 6.2)
  *        Version 0
  *
  ******************************************************************************/
@@ -1301,14 +1366,14 @@ struct acpi_table_pdtt {
  * starting at array_offset.
  */
 struct acpi_pdtt_channel {
-       u16 sub_channel_id;
+       u8 subchannel_id;
+       u8 flags;
 };
 
-/* Mask and Flags for above */
+/* Flags for above */
 
-#define ACPI_PDTT_SUBCHANNEL_ID_MASK        0x00FF
-#define ACPI_PDTT_RUNTIME_TRIGGER           (1<<8)
-#define ACPI_PPTT_WAIT_COMPLETION           (1<<9)
+#define ACPI_PDTT_RUNTIME_TRIGGER           (1)
+#define ACPI_PDTT_WAIT_COMPLETION           (1<<1)
 
 /*******************************************************************************
  *
@@ -1376,6 +1441,20 @@ struct acpi_pptt_cache {
 #define ACPI_PPTT_MASK_CACHE_TYPE           (0x0C)     /* Cache type */
 #define ACPI_PPTT_MASK_WRITE_POLICY         (0x10)     /* Write policy */
 
+/* Attributes describing cache */
+#define ACPI_PPTT_CACHE_READ_ALLOCATE       (0x0)      /* Cache line is allocated on read */
+#define ACPI_PPTT_CACHE_WRITE_ALLOCATE      (0x01)     /* Cache line is allocated on write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE         (0x02)     /* Cache line is allocated on read and write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT     (0x03)     /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_TYPE_DATA           (0x0)      /* Data cache */
+#define ACPI_PPTT_CACHE_TYPE_INSTR          (1<<2)     /* Instruction cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED        (2<<2)     /* Unified I & D cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT    (3<<2)     /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_POLICY_WB           (0x0)      /* Cache is write back */
+#define ACPI_PPTT_CACHE_POLICY_WT           (1<<4)     /* Cache is write through */
+
 /* 2: ID Structure */
 
 struct acpi_pptt_id {
@@ -1403,6 +1482,68 @@ struct acpi_table_sbst {
        u32 critical_level;
 };
 
+/*******************************************************************************
+ *
+ * SDEV - Secure Devices Table (ACPI 6.2)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_sdev {
+       struct acpi_table_header header;        /* Common ACPI table header */
+};
+
+struct acpi_sdev_header {
+       u8 type;
+       u8 flags;
+       u16 length;
+};
+
+/* Values for subtable type above */
+
+enum acpi_sdev_type {
+       ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0,
+       ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1,
+       ACPI_SDEV_TYPE_RESERVED = 2     /* 2 and greater are reserved */
+};
+
+/* Values for flags above */
+
+#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS    (1)
+
+/*
+ * SDEV subtables
+ */
+
+/* 0: Namespace Device Based Secure Device Structure */
+
+struct acpi_sdev_namespace {
+       struct acpi_sdev_header header;
+       u16 device_id_offset;
+       u16 device_id_length;
+       u16 vendor_data_offset;
+       u16 vendor_data_length;
+};
+
+/* 1: PCIe Endpoint Device Based Device Structure */
+
+struct acpi_sdev_pcie {
+       struct acpi_sdev_header header;
+       u16 segment;
+       u16 start_bus;
+       u16 path_offset;
+       u16 path_length;
+       u16 vendor_data_offset;
+       u16 vendor_data_length;
+};
+
+/* 1a: PCIe Endpoint path entry */
+
+struct acpi_sdev_pcie_path {
+       u8 device;
+       u8 function;
+};
+
 /*******************************************************************************
  *
  * SLIT - System Locality Distance Information Table
index 686b6f8..0d60d5d 100644 (file)
@@ -810,6 +810,7 @@ struct acpi_iort_smmu_v3 {
        u8 pxm;
        u8 reserved1;
        u16 reserved2;
+       u32 id_mapping_index;
 };
 
 /* Values for Model field above */
@@ -1246,6 +1247,8 @@ enum acpi_spmi_interface_types {
  * TCPA - Trusted Computing Platform Alliance table
  *        Version 2
  *
+ * TCG Hardware Interface Table for TPM 1.2 Clients and Servers
+ *
  * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
  * Version 1.2, Revision 8
  * February 27, 2017
@@ -1310,6 +1313,8 @@ struct acpi_table_tcpa_server {
  * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
  *        Version 4
  *
+ * TCG Hardware Interface Table for TPM 2.0 Clients and Servers
+ *
  * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
  * Version 1.2, Revision 8
  * February 27, 2017
@@ -1329,15 +1334,23 @@ struct acpi_table_tpm2 {
 /* Values for start_method above */
 
 #define ACPI_TPM2_NOT_ALLOWED                       0
+#define ACPI_TPM2_RESERVED1                         1
 #define ACPI_TPM2_START_METHOD                      2
+#define ACPI_TPM2_RESERVED3                         3
+#define ACPI_TPM2_RESERVED4                         4
+#define ACPI_TPM2_RESERVED5                         5
 #define ACPI_TPM2_MEMORY_MAPPED                     6
 #define ACPI_TPM2_COMMAND_BUFFER                    7
 #define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD  8
+#define ACPI_TPM2_RESERVED9                         9
+#define ACPI_TPM2_RESERVED10                        10
 #define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC       11 /* V1.2 Rev 8 */
+#define ACPI_TPM2_RESERVED                          12
 
-/* Trailer appears after any start_method subtables */
+/* Optional trailer appears after any start_method subtables */
 
 struct acpi_tpm2_trailer {
+       u8 method_parameters[12];
        u32 minimum_log_length; /* Minimum length for the event log area */
        u64 log_address;        /* Address of the event log area */
 };
index 4f077ed..31f1be7 100644 (file)
@@ -468,6 +468,8 @@ typedef void *acpi_handle;  /* Actually a ptr to a NS Node */
 #define ACPI_NSEC_PER_MSEC              1000000L
 #define ACPI_NSEC_PER_SEC               1000000000L
 
+#define ACPI_TIME_AFTER(a, b)           ((s64)((b) - (a)) < 0)
+
 /* Owner IDs are used to track namespace nodes for selective deletion */
 
 typedef u8 acpi_owner_id;
@@ -1299,6 +1301,8 @@ typedef enum {
 #define ACPI_OSI_WIN_7                  0x0B
 #define ACPI_OSI_WIN_8                  0x0C
 #define ACPI_OSI_WIN_10                 0x0D
+#define ACPI_OSI_WIN_10_RS1             0x0E
+#define ACPI_OSI_WIN_10_RS2             0x0F
 
 /* Definitions of getopt */
 
index ea189d8..8ac4e68 100644 (file)
@@ -7,9 +7,10 @@
 #ifndef _ASM_GENERIC_MM_HOOKS_H
 #define _ASM_GENERIC_MM_HOOKS_H
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-                                struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+                               struct mm_struct *mm)
 {
+       return 0;
 }
 
 static inline void arch_exit_mmap(struct mm_struct *mm)
index 757dc6f..868e685 100644 (file)
@@ -805,15 +805,23 @@ static inline int pmd_trans_huge(pmd_t pmd)
 {
        return 0;
 }
-#ifndef __HAVE_ARCH_PMD_WRITE
+#ifndef pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        BUG();
        return 0;
 }
-#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif /* pmd_write */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+       BUG();
+       return 0;
+}
+#endif /* pud_write */
+
 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
        (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
         !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
@@ -1017,6 +1025,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
 struct file;
 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
                        unsigned long size, pgprot_t *vma_prot);
+
+#ifndef CONFIG_X86_ESPFIX64
+static inline void init_espfix_bsp(void) { }
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #ifndef io_remap_pfn_range
index ee8b707..a564b83 100644 (file)
 #define INIT_TASK_DATA(align)                                          \
        . = ALIGN(align);                                               \
        VMLINUX_SYMBOL(__start_init_task) = .;                          \
+       VMLINUX_SYMBOL(init_thread_union) = .;                          \
+       VMLINUX_SYMBOL(init_stack) = .;                                 \
        *(.data..init_task)                                             \
+       *(.data..init_thread_info)                                      \
+       . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE;            \
        VMLINUX_SYMBOL(__end_init_task) = .;
 
 /*
index 6abf0a3..f38227a 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/if_alg.h>
 #include <linux/scatterlist.h>
 #include <linux/types.h>
+#include <linux/atomic.h>
 #include <net/sock.h>
 
 #include <crypto/aead.h>
@@ -150,7 +151,7 @@ struct af_alg_ctx {
        struct crypto_wait wait;
 
        size_t used;
-       size_t rcvused;
+       atomic_t rcvused;
 
        bool more;
        bool merge;
@@ -215,7 +216,7 @@ static inline int af_alg_rcvbuf(struct sock *sk)
        struct af_alg_ctx *ctx = ask->private;
 
        return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
-                         ctx->rcvused, 0);
+                    atomic_read(&ctx->rcvused), 0);
 }
 
 /**
@@ -242,6 +243,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
                   unsigned int ivsize);
 ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
                        int offset, size_t size, int flags);
+void af_alg_free_resources(struct af_alg_async_req *areq);
 void af_alg_async_cb(struct crypto_async_request *_req, int err);
 unsigned int af_alg_poll(struct file *file, struct socket *sock,
                         poll_table *wait);
index f0b44c1..c2bae8d 100644 (file)
@@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl,
                            struct ahash_instance *inst);
 void ahash_free_instance(struct crypto_instance *inst);
 
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+                   unsigned int keylen);
+
+static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+{
+       return alg->setkey != shash_no_setkey;
+}
+
 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
                            struct hash_alg_common *alg,
                            struct crypto_instance *inst);
index cceafa0..b67404f 100644 (file)
@@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
 
 struct mcryptd_cpu_queue {
        struct crypto_queue queue;
+       spinlock_t q_lock;
        struct work_struct work;
 };
 
index df9807a..5971577 100644 (file)
@@ -24,6 +24,7 @@
 #define __DRM_CONNECTOR_H__
 
 #include <linux/list.h>
+#include <linux/llist.h>
 #include <linux/ctype.h>
 #include <linux/hdmi.h>
 #include <drm/drm_mode_object.h>
@@ -916,6 +917,15 @@ struct drm_connector {
        uint8_t num_h_tile, num_v_tile;
        uint8_t tile_h_loc, tile_v_loc;
        uint16_t tile_h_size, tile_v_size;
+
+       /**
+        * @free_node:
+        *
+        * List used only by &drm_connector_iter to be able to clean up a
+        * connector from any context, in conjunction with
+        * &drm_mode_config.connector_free_work.
+        */
+       struct llist_node free_node;
 };
 
 #define obj_to_connector(x) container_of(x, struct drm_connector, base)
index 6f35909..efe6d5a 100644 (file)
@@ -362,7 +362,8 @@ void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
                                   const struct drm_display_mode *mode,
                                   enum hdmi_quantization_range rgb_quant_range,
-                                  bool rgb_quant_range_selectable);
+                                  bool rgb_quant_range_selectable,
+                                  bool is_hdmi2_sink);
 
 /**
  * drm_eld_mnl - Get ELD monitor name length in bytes.
@@ -464,6 +465,8 @@ struct edid *drm_get_edid(struct drm_connector *connector,
 struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
                                     struct i2c_adapter *adapter);
 struct edid *drm_edid_duplicate(const struct edid *edid);
+void drm_reset_display_info(struct drm_connector *connector);
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
 int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 
 u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
index b21e827..b0ce26d 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/types.h>
 #include <linux/idr.h>
 #include <linux/workqueue.h>
+#include <linux/llist.h>
 
 #include <drm/drm_modeset_lock.h>
 
@@ -393,7 +394,7 @@ struct drm_mode_config {
 
        /**
         * @connector_list_lock: Protects @num_connector and
-        * @connector_list.
+        * @connector_list and @connector_free_list.
         */
        spinlock_t connector_list_lock;
        /**
@@ -413,6 +414,21 @@ struct drm_mode_config {
         * &struct drm_connector_list_iter to walk this list.
         */
        struct list_head connector_list;
+       /**
+        * @connector_free_list:
+        *
+        * List of connector objects linked with &drm_connector.free_head.
+        * Protected by @connector_list_lock. Used by
+        * drm_for_each_connector_iter() and
+        * &struct drm_connector_list_iter to savely free connectors using
+        * @connector_free_work.
+        */
+       struct llist_head connector_free_list;
+       /**
+        * @connector_free_work: Work to clean up @connector_free_list.
+        */
+       struct work_struct connector_free_work;
+
        /**
         * @num_encoder:
         *
index 38a2b47..5938113 100644 (file)
@@ -58,12 +58,21 @@ int ttm_pool_populate(struct ttm_tt *ttm);
  */
 void ttm_pool_unpopulate(struct ttm_tt *ttm);
 
+/**
+ * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
+ */
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+
+/**
+ * Unpopulates and DMA unmaps pages as part of a
+ * ttm_dma_unpopulate() request */
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
+
 /**
  * Output the state of pools to debugfs file
  */
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
 
-
 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 /**
  * Initialize pool allocator.
@@ -83,17 +92,6 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
 #else
 static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
                                          unsigned max_pages)
@@ -116,16 +114,6 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
                                      struct device *dev)
 {
 }
-
-static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-       return -ENOMEM;
-}
-
-static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-}
-
 #endif
 
 #endif
index 01ee473..9da6ce2 100644 (file)
@@ -62,7 +62,7 @@ struct arch_timer_cpu {
        bool                    enabled;
 };
 
-int kvm_timer_hyp_init(void);
+int kvm_timer_hyp_init(bool);
 int kvm_timer_enable(struct kvm_vcpu *vcpu);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
@@ -93,7 +93,4 @@ void kvm_timer_init_vhe(void);
 #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
 #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
 
-void enable_el1_phys_timer_access(void);
-void disable_el1_phys_timer_access(void);
-
 #endif
diff --git a/include/lib/libgcc.h b/include/lib/libgcc.h
deleted file mode 100644 (file)
index 32e1e0f..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * include/lib/libgcc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.
- */
-
-#ifndef __LIB_LIBGCC_H
-#define __LIB_LIBGCC_H
-
-#include <asm/byteorder.h>
-
-typedef int word_type __attribute__ ((mode (__word__)));
-
-#ifdef __BIG_ENDIAN
-struct DWstruct {
-       int high, low;
-};
-#elif defined(__LITTLE_ENDIAN)
-struct DWstruct {
-       int low, high;
-};
-#else
-#error I feel sick.
-#endif
-
-typedef union {
-       struct DWstruct s;
-       long long ll;
-} DWunion;
-
-#endif /* __ASM_LIBGCC_H */
index dc1ebfe..b8f4c3c 100644 (file)
@@ -451,6 +451,7 @@ void __init acpi_no_s4_hw_signature(void);
 void __init acpi_old_suspend_ordering(void);
 void __init acpi_nvs_nosave(void);
 void __init acpi_nvs_nosave_s3(void);
+void __init acpi_sleep_no_blacklist(void);
 #endif /* CONFIG_PM_SLEEP */
 
 struct acpi_osc_context {
@@ -640,6 +641,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
        return false;
 }
 
+static inline const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+       return NULL;
+}
+
 static inline bool is_acpi_node(struct fwnode_handle *fwnode)
 {
        return false;
index 82f0c8f..23d29b3 100644 (file)
@@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
 
 #define bio_set_dev(bio, bdev)                         \
 do {                                           \
+       if ((bio)->bi_disk != (bdev)->bd_disk)  \
+               bio_clear_flag(bio, BIO_THROTTLED);\
        (bio)->bi_disk = (bdev)->bd_disk;       \
        (bio)->bi_partno = (bdev)->bd_partno;   \
 } while (0)
index a1e628e..9e7d8bd 100644 (file)
@@ -50,8 +50,6 @@ struct blk_issue_stat {
 struct bio {
        struct bio              *bi_next;       /* request queue link */
        struct gendisk          *bi_disk;
-       u8                      bi_partno;
-       blk_status_t            bi_status;
        unsigned int            bi_opf;         /* bottom bits req flags,
                                                 * top bits REQ_OP. Use
                                                 * accessors.
@@ -59,8 +57,8 @@ struct bio {
        unsigned short          bi_flags;       /* status, etc and bvec pool number */
        unsigned short          bi_ioprio;
        unsigned short          bi_write_hint;
-
-       struct bvec_iter        bi_iter;
+       blk_status_t            bi_status;
+       u8                      bi_partno;
 
        /* Number of segments in this BIO after
         * physical address coalescing is performed.
@@ -74,8 +72,9 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
-       atomic_t                __bi_remaining;
+       struct bvec_iter        bi_iter;
 
+       atomic_t                __bi_remaining;
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
index 8089ca1..0ce8a37 100644 (file)
@@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
 struct request {
        struct list_head queuelist;
        union {
-               call_single_data_t csd;
+               struct __call_single_data csd;
                u64 fifo_time;
        };
 
@@ -241,14 +241,24 @@ struct request {
        struct request *next_rq;
 };
 
+static inline bool blk_op_is_scsi(unsigned int op)
+{
+       return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
+}
+
+static inline bool blk_op_is_private(unsigned int op)
+{
+       return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+}
+
 static inline bool blk_rq_is_scsi(struct request *rq)
 {
-       return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+       return blk_op_is_scsi(req_op(rq));
 }
 
 static inline bool blk_rq_is_private(struct request *rq)
 {
-       return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+       return blk_op_is_private(req_op(rq));
 }
 
 static inline bool blk_rq_is_passthrough(struct request *rq)
@@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
        return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
 }
 
+static inline bool bio_is_passthrough(struct bio *bio)
+{
+       unsigned op = bio_op(bio);
+
+       return blk_op_is_scsi(op) || blk_op_is_private(op);
+}
+
 static inline unsigned short req_get_ioprio(struct request *req)
 {
        return req->ioprio;
@@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 extern void blk_rq_unprep_clone(struct request *rq);
 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
+extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
 extern void blk_queue_split(struct request_queue *, struct bio **);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
index e55e425..0b25cf8 100644 (file)
@@ -43,7 +43,14 @@ struct bpf_map_ops {
 };
 
 struct bpf_map {
-       atomic_t refcnt;
+       /* 1st cacheline with read-mostly members of which some
+        * are also accessed in fast-path (e.g. ops, max_entries).
+        */
+       const struct bpf_map_ops *ops ____cacheline_aligned;
+       struct bpf_map *inner_map_meta;
+#ifdef CONFIG_SECURITY
+       void *security;
+#endif
        enum bpf_map_type map_type;
        u32 key_size;
        u32 value_size;
@@ -52,15 +59,17 @@ struct bpf_map {
        u32 pages;
        u32 id;
        int numa_node;
-       struct user_struct *user;
-       const struct bpf_map_ops *ops;
-       struct work_struct work;
+       bool unpriv_array;
+       /* 7 bytes hole */
+
+       /* 2nd cacheline with misc members to avoid false sharing
+        * particularly with refcounting.
+        */
+       struct user_struct *user ____cacheline_aligned;
+       atomic_t refcnt;
        atomic_t usercnt;
-       struct bpf_map *inner_map_meta;
+       struct work_struct work;
        char name[BPF_OBJ_NAME_LEN];
-#ifdef CONFIG_SECURITY
-       void *security;
-#endif
 };
 
 /* function argument constraints */
@@ -221,6 +230,7 @@ struct bpf_prog_aux {
 struct bpf_array {
        struct bpf_map map;
        u32 elem_size;
+       u32 index_mask;
        /* 'ownership' of prog_array is claimed by the first program that
         * is going to use this map or by the first program which FD is stored
         * in the map to make sure that all callers and callees have the same
@@ -419,6 +429,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
                attr->numa_node : NUMA_NO_NODE;
 }
 
+struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
+
 #else /* !CONFIG_BPF_SYSCALL */
 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 {
@@ -506,6 +518,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
 {
        return 0;
 }
+
+static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
+                               enum bpf_prog_type type)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
 #endif /* CONFIG_BPF_SYSCALL */
 
 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -514,6 +532,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
        return bpf_prog_get_type_dev(ufd, type, false);
 }
 
+bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
+
 int bpf_prog_offload_compile(struct bpf_prog *prog);
 void bpf_prog_offload_destroy(struct bpf_prog *prog);
 
index c561b98..1632bb1 100644 (file)
  * In practice this is far bigger than any realistic pointer offset; this limit
  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
  */
-#define BPF_MAX_VAR_OFF        (1ULL << 31)
+#define BPF_MAX_VAR_OFF        (1 << 29)
 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
  * that converting umax_value to int cannot overflow.
  */
-#define BPF_MAX_VAR_SIZ        INT_MAX
+#define BPF_MAX_VAR_SIZ        (1 << 29)
 
 /* Liveness marks, used for registers and spilled-regs (in stack slots).
  * Read marks propagate upwards until they find a write mark; they record that
index 2272ded..631354a 100644 (file)
 /* Mark a function definition as prohibited from being cloned. */
 #define __noclone      __attribute__((__noclone__, __optimize__("no-tracer")))
 
-#ifdef RANDSTRUCT_PLUGIN
+#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
 #define __randomize_layout __attribute__((randomize_layout))
 #define __no_randomize_layout __attribute__((no_randomize_layout))
 #endif
index 188ed9f..52e611a 100644 (file)
@@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- * compiler is aware of some particular ordering.  One way to make the
- * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
  *
- * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- * data types like structs or unions. If the size of the accessed data
- * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
- * least two memcpy()s: one for the __builtin_memcpy() and then one for
- * the macro doing the copy of variable - '__u' allocated on the stack.
+ * These two macros will also work on aggregate data types like structs or
+ * unions. If the size of the accessed data type exceeds the word size of
+ * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
+ * fall back to memcpy(). There's at least two memcpy()s: one for the
+ * __builtin_memcpy() and then one for the macro doing the copy of variable
+ * - '__u' allocated on the stack.
  *
  * Their two major use cases are: (1) Mediating communication between
  * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
  * mutilate accesses that either do not require ordering or that interact
  * with an explicit memory barrier or atomic instruction that provides the
  * required ordering.
@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
        compiletime_assert(__native_word(t),                            \
                "Need native word sized stores/loads for atomicity.")
 
-/*
- * Prevent the compiler from merging or refetching accesses.  The compiler
- * is also forbidden from reordering successive instances of ACCESS_ONCE(),
- * but only when the compiler is aware of some particular ordering.  One way
- * to make the compiler aware of ordering is to put the two invocations of
- * ACCESS_ONCE() in different C statements.
- *
- * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
- * on a union member will work as long as the size of the member matches the
- * size of the union and the size is smaller than word size.
- *
- * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
- * between process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
- *
- * If possible use READ_ONCE()/WRITE_ONCE() instead.
- */
-#define __ACCESS_ONCE(x) ({ \
-        __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
-       (volatile typeof(x) *)&(x); })
-#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
-
 #endif /* __LINUX_COMPILER_H */
index 0662a41..519e949 100644 (file)
@@ -10,9 +10,6 @@
  */
 
 #include <linux/wait.h>
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-#include <linux/lockdep.h>
-#endif
 
 /*
  * struct completion - structure used to maintain state for a "completion"
 struct completion {
        unsigned int done;
        wait_queue_head_t wait;
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-       struct lockdep_map_cross map;
-#endif
 };
 
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-static inline void complete_acquire(struct completion *x)
-{
-       lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
-}
-
-static inline void complete_release(struct completion *x)
-{
-       lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
-}
-
-static inline void complete_release_commit(struct completion *x)
-{
-       lock_commit_crosslock((struct lockdep_map *)&x->map);
-}
-
-#define init_completion_map(x, m)                                      \
-do {                                                                   \
-       lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map,     \
-                       (m)->name, (m)->key, 0);                                \
-       __init_completion(x);                                           \
-} while (0)
-
-#define init_completion(x)                                             \
-do {                                                                   \
-       static struct lock_class_key __key;                             \
-       lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map,     \
-                       "(completion)" #x,                              \
-                       &__key, 0);                                     \
-       __init_completion(x);                                           \
-} while (0)
-#else
 #define init_completion_map(x, m) __init_completion(x)
 #define init_completion(x) __init_completion(x)
 static inline void complete_acquire(struct completion *x) {}
 static inline void complete_release(struct completion *x) {}
-static inline void complete_release_commit(struct completion *x) {}
-#endif
 
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-#define COMPLETION_INITIALIZER(work) \
-       { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
-       STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
-#else
 #define COMPLETION_INITIALIZER(work) \
        { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-#endif
 
 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
        (*({ init_completion_map(&(work), &(map)); &(work); }))
index a04ef7c..7b01bc1 100644 (file)
@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
 extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
 extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
 
+extern ssize_t cpu_show_meltdown(struct device *dev,
+                                struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v1(struct device *dev,
+                                  struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v2(struct device *dev,
+                                  struct device_attribute *attr, char *buf);
+
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
                                 const struct attribute_group **groups,
index d4292eb..de0dafb 100644 (file)
@@ -30,9 +30,6 @@
 
 struct cpufreq_policy;
 
-typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
-                           unsigned long voltage, u32 *power);
-
 #ifdef CONFIG_CPU_THERMAL
 /**
  * cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -41,43 +38,6 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
 struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy);
 
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
-                              u32 capacitance, get_static_t plat_static_func);
-
-/**
- * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
- * @np: a valid struct device_node to the cooling device device tree node.
- * @policy: cpufreq policy.
- */
-#ifdef CONFIG_THERMAL_OF
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
-                           struct cpufreq_policy *policy);
-
-struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
-                                 struct cpufreq_policy *policy,
-                                 u32 capacitance,
-                                 get_static_t plat_static_func);
-#else
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
-                           struct cpufreq_policy *policy)
-{
-       return ERR_PTR(-ENOSYS);
-}
-
-static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
-                                 struct cpufreq_policy *policy,
-                                 u32 capacitance,
-                                 get_static_t plat_static_func)
-{
-       return NULL;
-}
-#endif
-
 /**
  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
  * @cdev: thermal cooling device pointer.
@@ -90,34 +50,27 @@ cpufreq_cooling_register(struct cpufreq_policy *policy)
 {
        return ERR_PTR(-ENOSYS);
 }
-static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
-                              u32 capacitance, get_static_t plat_static_func)
-{
-       return NULL;
-}
 
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
-                           struct cpufreq_policy *policy)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 {
-       return ERR_PTR(-ENOSYS);
+       return;
 }
+#endif /* CONFIG_CPU_THERMAL */
 
+#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL)
+/**
+ * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
+ * @policy: cpufreq policy.
+ */
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct cpufreq_policy *policy);
+#else
 static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
-                                 struct cpufreq_policy *policy,
-                                 u32 capacitance,
-                                 get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
 {
        return NULL;
 }
-
-static inline
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
-{
-       return;
-}
-#endif /* CONFIG_CPU_THERMAL */
+#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
 
 #endif /* __CPU_COOLING_H__ */
index 201ab72..1a32e55 100644 (file)
@@ -86,7 +86,7 @@ enum cpuhp_state {
        CPUHP_MM_ZSWP_POOL_PREPARE,
        CPUHP_KVM_PPC_BOOK3S_PREPARE,
        CPUHP_ZCOMP_PREPARE,
-       CPUHP_TIMERS_DEAD,
+       CPUHP_TIMERS_PREPARE,
        CPUHP_MIPS_SOC_PREPARE,
        CPUHP_BP_PREPARE_DYN,
        CPUHP_BP_PREPARE_DYN_END                = CPUHP_BP_PREPARE_DYN + 20,
index 06097ef..b511f6d 100644 (file)
@@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
        vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
 #define VMCOREINFO_SYMBOL(name) \
        vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SYMBOL_ARRAY(name) \
+       vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
 #define VMCOREINFO_SIZE(name) \
        vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
                              (unsigned long)sizeof(name))
index cd4f420..72c92c3 100644 (file)
@@ -5,12 +5,19 @@
 #include <linux/types.h>
 
 extern u16 const crc_ccitt_table[256];
+extern u16 const crc_ccitt_false_table[256];
 
 extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
+extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
 
 static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
 {
        return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
 }
 
+static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
+{
+    return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
+}
+
 #endif /* _LINUX_CRC_CCITT_H */
index 099058e..6312865 100644 (file)
@@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
 extern void set_groups(struct cred *, struct group_info *);
 extern int groups_search(const struct group_info *, kgid_t);
 extern bool may_setgroups(void);
+extern void groups_sort(struct group_info *);
 
 /*
  * The security context of a task
index f36ecc2..3b0ba54 100644 (file)
@@ -216,6 +216,8 @@ static inline void debugfs_remove(struct dentry *dentry)
 static inline void debugfs_remove_recursive(struct dentry *dentry)
 { }
 
+const struct file_operations *debugfs_real_fops(const struct file *filp);
+
 static inline int debugfs_file_get(struct dentry *dentry)
 {
        return 0;
index 4178d24..5e335b6 100644 (file)
@@ -71,7 +71,7 @@ extern void delayacct_init(void);
 extern void __delayacct_tsk_init(struct task_struct *);
 extern void __delayacct_tsk_exit(struct task_struct *);
 extern void __delayacct_blkio_start(void);
-extern void __delayacct_blkio_end(void);
+extern void __delayacct_blkio_end(struct task_struct *);
 extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
 extern __u64 __delayacct_blkio_ticks(struct task_struct *);
 extern void __delayacct_freepages_start(void);
@@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void)
                __delayacct_blkio_start();
 }
 
-static inline void delayacct_blkio_end(void)
+static inline void delayacct_blkio_end(struct task_struct *p)
 {
        if (current->delays)
-               __delayacct_blkio_end();
+               __delayacct_blkio_end(p);
        delayacct_clear_flag(DELAYACCT_PF_BLKIO);
 }
 
@@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
 {}
 static inline void delayacct_blkio_start(void)
 {}
-static inline void delayacct_blkio_end(void)
+static inline void delayacct_blkio_end(struct task_struct *p)
 {}
 static inline int delayacct_add_tsk(struct taskstats *d,
                                        struct task_struct *tsk)
index e8f8e8f..81ed9b2 100644 (file)
@@ -704,7 +704,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
        return ret;
 }
 
-#ifdef CONFIG_HAS_DMA
 static inline int dma_get_cache_alignment(void)
 {
 #ifdef ARCH_DMA_MINALIGN
@@ -712,7 +711,6 @@ static inline int dma_get_cache_alignment(void)
 #endif
        return 1;
 }
-#endif
 
 /* flags for the coherent memory api */
 #define DMA_MEMORY_EXCLUSIVE           0x01
index d813f7b..29fdf80 100644 (file)
@@ -140,11 +140,13 @@ struct efi_boot_memmap {
 
 struct capsule_info {
        efi_capsule_header_t    header;
+       efi_capsule_header_t    *capsule;
        int                     reset_type;
        long                    index;
        size_t                  count;
        size_t                  total_size;
-       phys_addr_t             *pages;
+       struct page             **pages;
+       phys_addr_t             *phys;
        size_t                  page_bytes_remain;
 };
 
index 2995a27..511fbaa 100644 (file)
@@ -1872,7 +1872,7 @@ struct super_operations {
  */
 #define __IS_FLG(inode, flg)   ((inode)->i_sb->s_flags & (flg))
 
-static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & MS_RDONLY; }
+static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
 #define IS_RDONLY(inode)       sb_rdonly((inode)->i_sb)
 #define IS_SYNC(inode)         (__IS_FLG(inode, SB_SYNCHRONOUS) || \
                                        ((inode)->i_flags & S_SYNC))
@@ -3088,7 +3088,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
 static inline int vfs_fstatat(int dfd, const char __user *filename,
                              struct kstat *stat, int flags)
 {
-       return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+       return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+                        stat, STATX_BASIC_STATS);
 }
 static inline int vfs_fstat(int fd, struct kstat *stat)
 {
@@ -3194,6 +3195,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
        return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
 }
 
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+{
+       struct inode *inode;
+
+       if (!vma->vm_file)
+               return false;
+       if (!vma_is_dax(vma))
+               return false;
+       inode = file_inode(vma->vm_file);
+       if (inode->i_mode == S_IFCHR)
+               return false; /* device-dax */
+       return true;
+}
+
 static inline int iocb_flags(struct file *file)
 {
        int res = 0;
index f4ff47d..fe0c349 100644 (file)
@@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
 {
        if (fscache_cookie_valid(cookie) && PageFsCache(page))
                return __fscache_maybe_release_page(cookie, page, gfp);
-       return false;
+       return true;
 }
 
 /**
index 2bab819..9c3c9a3 100644 (file)
@@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
 
 extern int ftrace_nr_registered_ops(void);
 
+struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
+
 bool is_ftrace_trampoline(unsigned long addr);
 
 /*
@@ -764,9 +766,6 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-/* for init task */
-#define INIT_FTRACE_GRAPH              .ret_stack = NULL,
-
 /*
  * Stack of return addresses for functions
  * of a thread.
@@ -844,7 +843,6 @@ static inline void unpause_graph_tracing(void)
 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
 
 #define __notrace_funcgraph
-#define INIT_FTRACE_GRAPH
 
 static inline void ftrace_graph_init_task(struct task_struct *t) { }
 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -923,10 +921,6 @@ extern int tracepoint_printk;
 extern void disable_trace_on_warning(void);
 extern int __disable_trace_on_warning;
 
-#ifdef CONFIG_PREEMPT
-#define INIT_TRACE_RECURSION           .trace_recursion = 0,
-#endif
-
 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
                             void __user *buffer, size_t *lenp,
                             loff_t *ppos);
@@ -935,10 +929,6 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
 static inline void  disable_trace_on_warning(void) { }
 #endif /* CONFIG_TRACING */
 
-#ifndef INIT_TRACE_RECURSION
-#define INIT_TRACE_RECURSION
-#endif
-
 #ifdef CONFIG_FTRACE_SYSCALLS
 
 unsigned long arch_syscall_addr(int nr);
index 55e6725..7258cd6 100644 (file)
@@ -66,9 +66,10 @@ struct gpio_irq_chip {
        /**
         * @lock_key:
         *
-        * Per GPIO IRQ chip lockdep class.
+        * Per GPIO IRQ chip lockdep classes.
         */
        struct lock_class_key *lock_key;
+       struct lock_class_key *request_key;
 
        /**
         * @parent_handler:
@@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
 
 /* add/remove chips */
 extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
-                                     struct lock_class_key *lock_key);
+                                     struct lock_class_key *lock_key,
+                                     struct lock_class_key *request_key);
 
 /**
  * gpiochip_add_data() - register a gpio_chip
@@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
  */
 #ifdef CONFIG_LOCKDEP
 #define gpiochip_add_data(chip, data) ({               \
-               static struct lock_class_key key;       \
-               gpiochip_add_data_with_key(chip, data, &key);   \
+               static struct lock_class_key lock_key;  \
+               static struct lock_class_key request_key;         \
+               gpiochip_add_data_with_key(chip, data, &lock_key, \
+                                          &request_key);         \
        })
 #else
-#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL)
+#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
 #endif
 
 static inline int gpiochip_add(struct gpio_chip *chip)
@@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
                             irq_flow_handler_t handler,
                             unsigned int type,
                             bool threaded,
-                            struct lock_class_key *lock_key);
+                            struct lock_class_key *lock_key,
+                            struct lock_class_key *request_key);
 
 #ifdef CONFIG_LOCKDEP
 
@@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
                                       irq_flow_handler_t handler,
                                       unsigned int type)
 {
-       static struct lock_class_key key;
+       static struct lock_class_key lock_key;
+       static struct lock_class_key request_key;
 
        return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
-                                       handler, type, false, &key);
+                                       handler, type, false,
+                                       &lock_key, &request_key);
 }
 
 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          unsigned int type)
 {
 
-       static struct lock_class_key key;
+       static struct lock_class_key lock_key;
+       static struct lock_class_key request_key;
 
        return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
-                                       handler, type, true, &key);
+                                       handler, type, true,
+                                       &lock_key, &request_key);
 }
 #else
 static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
@@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
                                       unsigned int type)
 {
        return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
-                                       handler, type, false, NULL);
+                                       handler, type, false, NULL, NULL);
 }
 
 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          unsigned int type)
 {
        return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
-                                       handler, type, true, NULL);
+                                       handler, type, true, NULL, NULL);
 }
 #endif /* CONFIG_LOCKDEP */
 
index fbf5b31..82a2588 100644 (file)
@@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)
 }
 #endif
 
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
-       BUG();
-       return 0;
-}
-#endif
-
 #define HUGETLB_ANON_FILE "anon_hugepage"
 
 enum {
index f3e97c5..6c93366 100644 (file)
@@ -708,6 +708,7 @@ struct vmbus_channel {
        u8 monitor_bit;
 
        bool rescind; /* got rescind msg */
+       struct completion rescind_event;
 
        u32 ringbuffer_gpadlhandle;
 
index 7c3a365..fa14f83 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/radix-tree.h>
 #include <linux/gfp.h>
 #include <linux/percpu.h>
+#include <linux/bug.h>
 
 struct idr {
        struct radix_tree_root  idr_rt;
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
new file mode 100644 (file)
index 0000000..e7dc7a5
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file discribe the STM32 DFSDM IIO driver API for audio part
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef STM32_DFSDM_ADC_H
+#define STM32_DFSDM_ADC_H
+
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+                           int (*cb)(const void *data, size_t size,
+                                     void *private),
+                           void *private);
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev);
+
+#endif
index 5e347a9..9887f4f 100644 (file)
@@ -133,6 +133,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
                                             int (*cb)(const void *data,
                                                       void *private),
                                             void *private);
+/**
+ * iio_channel_cb_set_buffer_watermark() - set the buffer watermark.
+ * @cb_buffer:         The callback buffer from whom we want the channel
+ *                     information.
+ * @watermark: buffer watermark in bytes.
+ *
+ * This function allows to configure the buffer watermark.
+ */
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer,
+                                       size_t watermark);
+
 /**
  * iio_channel_release_all_cb() - release and unregister the callback.
  * @cb_buffer:         The callback buffer that was allocated.
@@ -215,6 +226,32 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
  */
 int iio_read_channel_processed(struct iio_channel *chan, int *val);
 
+/**
+ * iio_write_channel_attribute() - Write values to the device attribute.
+ * @chan:      The channel being queried.
+ * @val:       Value being written.
+ * @val2:      Value being written.val2 use depends on attribute type.
+ * @attribute: info attribute to be read.
+ *
+ * Returns an error code or 0.
+ */
+int iio_write_channel_attribute(struct iio_channel *chan, int val,
+                               int val2, enum iio_chan_info_enum attribute);
+
+/**
+ * iio_read_channel_attribute() - Read values from the device attribute.
+ * @chan:      The channel being queried.
+ * @val:       Value being written.
+ * @val2:      Value being written.Val2 use depends on attribute type.
+ * @attribute: info attribute to be written.
+ *
+ * Returns an error code if failed. Else returns a description of what is in val
+ * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_attribute(struct iio_channel *chan, int *val,
+                              int *val2, enum iio_chan_info_enum attribute);
+
 /**
  * iio_write_channel_raw() - write to a given channel
  * @chan:              The channel being queried.
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
new file mode 100644 (file)
index 0000000..44d48bb
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Industrial I/O in kernel hardware consumer interface
+ *
+ * Copyright 2017 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef LINUX_IIO_HW_CONSUMER_H
+#define LINUX_IIO_HW_CONSUMER_H
+
+struct iio_hw_consumer;
+
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
+
+#endif
index 20b6134..f12a61b 100644 (file)
  * Currently assumes nano seconds.
  */
 
-enum iio_chan_info_enum {
-       IIO_CHAN_INFO_RAW = 0,
-       IIO_CHAN_INFO_PROCESSED,
-       IIO_CHAN_INFO_SCALE,
-       IIO_CHAN_INFO_OFFSET,
-       IIO_CHAN_INFO_CALIBSCALE,
-       IIO_CHAN_INFO_CALIBBIAS,
-       IIO_CHAN_INFO_PEAK,
-       IIO_CHAN_INFO_PEAK_SCALE,
-       IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
-       IIO_CHAN_INFO_AVERAGE_RAW,
-       IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
-       IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
-       IIO_CHAN_INFO_SAMP_FREQ,
-       IIO_CHAN_INFO_FREQUENCY,
-       IIO_CHAN_INFO_PHASE,
-       IIO_CHAN_INFO_HARDWAREGAIN,
-       IIO_CHAN_INFO_HYSTERESIS,
-       IIO_CHAN_INFO_INT_TIME,
-       IIO_CHAN_INFO_ENABLE,
-       IIO_CHAN_INFO_CALIBHEIGHT,
-       IIO_CHAN_INFO_CALIBWEIGHT,
-       IIO_CHAN_INFO_DEBOUNCE_COUNT,
-       IIO_CHAN_INFO_DEBOUNCE_TIME,
-       IIO_CHAN_INFO_CALIBEMISSIVITY,
-       IIO_CHAN_INFO_OVERSAMPLING_RATIO,
-};
-
 enum iio_shared_by {
        IIO_SEPARATE,
        IIO_SHARED_BY_TYPE,
index 34d59bf..464458d 100644 (file)
 #define LPTIM2_OUT     "lptim2_out"
 #define LPTIM3_OUT     "lptim3_out"
 
-#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
 bool is_stm32_lptim_trigger(struct iio_trigger *trig);
 #else
 static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
 {
+#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+       pr_warn_once("stm32 lptim_trigger not linked in\n");
+#endif
        return false;
 }
 #endif
index 2aa7b63..6eb3d68 100644 (file)
@@ -34,4 +34,32 @@ enum iio_available_type {
        IIO_AVAIL_RANGE,
 };
 
+enum iio_chan_info_enum {
+       IIO_CHAN_INFO_RAW = 0,
+       IIO_CHAN_INFO_PROCESSED,
+       IIO_CHAN_INFO_SCALE,
+       IIO_CHAN_INFO_OFFSET,
+       IIO_CHAN_INFO_CALIBSCALE,
+       IIO_CHAN_INFO_CALIBBIAS,
+       IIO_CHAN_INFO_PEAK,
+       IIO_CHAN_INFO_PEAK_SCALE,
+       IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+       IIO_CHAN_INFO_AVERAGE_RAW,
+       IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+       IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
+       IIO_CHAN_INFO_SAMP_FREQ,
+       IIO_CHAN_INFO_FREQUENCY,
+       IIO_CHAN_INFO_PHASE,
+       IIO_CHAN_INFO_HARDWAREGAIN,
+       IIO_CHAN_INFO_HYSTERESIS,
+       IIO_CHAN_INFO_INT_TIME,
+       IIO_CHAN_INFO_ENABLE,
+       IIO_CHAN_INFO_CALIBHEIGHT,
+       IIO_CHAN_INFO_CALIBWEIGHT,
+       IIO_CHAN_INFO_DEBOUNCE_COUNT,
+       IIO_CHAN_INFO_DEBOUNCE_TIME,
+       IIO_CHAN_INFO_CALIBEMISSIVITY,
+       IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+};
+
 #endif /* _IIO_TYPES_H_ */
index 6a53262..a454b8a 100644 (file)
 
 #include <asm/thread_info.h>
 
-#ifdef CONFIG_SMP
-# define INIT_PUSHABLE_TASKS(tsk)                                      \
-       .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
-#else
-# define INIT_PUSHABLE_TASKS(tsk)
-#endif
-
 extern struct files_struct init_files;
 extern struct fs_struct init_fs;
-
-#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ(tsk)                                                   \
-       .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
-#else
-#define INIT_CPUSET_SEQ(tsk)
-#endif
+extern struct nsproxy init_nsproxy;
+extern struct group_info init_groups;
+extern struct cred init_cred;
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 #define INIT_PREV_CPUTIME(x)   .prev_cputime = {                       \
@@ -47,67 +36,16 @@ extern struct fs_struct init_fs;
 #endif
 
 #ifdef CONFIG_POSIX_TIMERS
-#define INIT_POSIX_TIMERS(s)                                           \
-       .posix_timers = LIST_HEAD_INIT(s.posix_timers),
 #define INIT_CPU_TIMERS(s)                                             \
        .cpu_timers = {                                                 \
                LIST_HEAD_INIT(s.cpu_timers[0]),                        \
                LIST_HEAD_INIT(s.cpu_timers[1]),                        \
-               LIST_HEAD_INIT(s.cpu_timers[2]),                                                                \
-       },
-#define INIT_CPUTIMER(s)                                               \
-       .cputimer       = {                                             \
-               .cputime_atomic = INIT_CPUTIME_ATOMIC,                  \
-               .running        = false,                                \
-               .checking_timer = false,                                \
+               LIST_HEAD_INIT(s.cpu_timers[2]),                        \
        },
 #else
-#define INIT_POSIX_TIMERS(s)
 #define INIT_CPU_TIMERS(s)
-#define INIT_CPUTIMER(s)
 #endif
 
-#define INIT_SIGNALS(sig) {                                            \
-       .nr_threads     = 1,                                            \
-       .thread_head    = LIST_HEAD_INIT(init_task.thread_node),        \
-       .wait_chldexit  = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
-       .shared_pending = {                                             \
-               .list = LIST_HEAD_INIT(sig.shared_pending.list),        \
-               .signal =  {{0}}},                                      \
-       INIT_POSIX_TIMERS(sig)                                          \
-       INIT_CPU_TIMERS(sig)                                            \
-       .rlim           = INIT_RLIMITS,                                 \
-       INIT_CPUTIMER(sig)                                              \
-       INIT_PREV_CPUTIME(sig)                                          \
-       .cred_guard_mutex =                                             \
-                __MUTEX_INITIALIZER(sig.cred_guard_mutex),             \
-}
-
-extern struct nsproxy init_nsproxy;
-
-#define INIT_SIGHAND(sighand) {                                                \
-       .count          = ATOMIC_INIT(1),                               \
-       .action         = { { { .sa_handler = SIG_DFL, } }, },          \
-       .siglock        = __SPIN_LOCK_UNLOCKED(sighand.siglock),        \
-       .signalfd_wqh   = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh),  \
-}
-
-extern struct group_info init_groups;
-
-#define INIT_STRUCT_PID {                                              \
-       .count          = ATOMIC_INIT(1),                               \
-       .tasks          = {                                             \
-               { .first = NULL },                                      \
-               { .first = NULL },                                      \
-               { .first = NULL },                                      \
-       },                                                              \
-       .level          = 0,                                            \
-       .numbers        = { {                                           \
-               .nr             = 0,                                    \
-               .ns             = &init_pid_ns,                         \
-       }, }                                                            \
-}
-
 #define INIT_PID_LINK(type)                                    \
 {                                                              \
        .node = {                                               \
@@ -117,192 +55,16 @@ extern struct group_info init_groups;
        .pid = &init_struct_pid,                                \
 }
 
-#ifdef CONFIG_AUDITSYSCALL
-#define INIT_IDS \
-       .loginuid = INVALID_UID, \
-       .sessionid = (unsigned int)-1,
-#else
-#define INIT_IDS
-#endif
-
-#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_PREEMPT(tsk)                                     \
-       .rcu_read_lock_nesting = 0,                                     \
-       .rcu_read_unlock_special.s = 0,                                 \
-       .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),           \
-       .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_TASKS_RCU
-#define INIT_TASK_RCU_TASKS(tsk)                                       \
-       .rcu_tasks_holdout = false,                                     \
-       .rcu_tasks_holdout_list =                                       \
-               LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list),             \
-       .rcu_tasks_idle_cpu = -1,
-#else
-#define INIT_TASK_RCU_TASKS(tsk)
-#endif
-
-extern struct cred init_cred;
-
-#ifdef CONFIG_CGROUP_SCHED
-# define INIT_CGROUP_SCHED(tsk)                                                \
-       .sched_task_group = &root_task_group,
-#else
-# define INIT_CGROUP_SCHED(tsk)
-#endif
-
-#ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk)                                         \
-       .perf_event_mutex =                                             \
-                __MUTEX_INITIALIZER(tsk.perf_event_mutex),             \
-       .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
-#else
-# define INIT_PERF_EVENTS(tsk)
-#endif
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-# define INIT_VTIME(tsk)                                               \
-       .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount),              \
-       .vtime.starttime = 0,                                           \
-       .vtime.state = VTIME_SYS,
-#else
-# define INIT_VTIME(tsk)
-#endif
-
 #define INIT_TASK_COMM "swapper"
 
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk)                                          \
-       .pi_waiters = RB_ROOT_CACHED,                                   \
-       .pi_top_task = NULL,
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
-#ifdef CONFIG_NUMA_BALANCING
-# define INIT_NUMA_BALANCING(tsk)                                      \
-       .numa_preferred_nid = -1,                                       \
-       .numa_group = NULL,                                             \
-       .numa_faults = NULL,
-#else
-# define INIT_NUMA_BALANCING(tsk)
-#endif
-
-#ifdef CONFIG_KASAN
-# define INIT_KASAN(tsk)                                               \
-       .kasan_depth = 1,
-#else
-# define INIT_KASAN(tsk)
-#endif
-
-#ifdef CONFIG_LIVEPATCH
-# define INIT_LIVEPATCH(tsk)                                           \
-       .patch_state = KLP_UNDEFINED,
-#else
-# define INIT_LIVEPATCH(tsk)
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-# define INIT_TASK_TI(tsk)                     \
-       .thread_info = INIT_THREAD_INFO(tsk),   \
-       .stack_refcount = ATOMIC_INIT(1),
-#else
-# define INIT_TASK_TI(tsk)
-#endif
-
-#ifdef CONFIG_SECURITY
-#define INIT_TASK_SECURITY .security = NULL,
-#else
-#define INIT_TASK_SECURITY
-#endif
-
-/*
- *  INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-#define INIT_TASK(tsk) \
-{                                                                      \
-       INIT_TASK_TI(tsk)                                               \
-       .state          = 0,                                            \
-       .stack          = init_stack,                                   \
-       .usage          = ATOMIC_INIT(2),                               \
-       .flags          = PF_KTHREAD,                                   \
-       .prio           = MAX_PRIO-20,                                  \
-       .static_prio    = MAX_PRIO-20,                                  \
-       .normal_prio    = MAX_PRIO-20,                                  \
-       .policy         = SCHED_NORMAL,                                 \
-       .cpus_allowed   = CPU_MASK_ALL,                                 \
-       .nr_cpus_allowed= NR_CPUS,                                      \
-       .mm             = NULL,                                         \
-       .active_mm      = &init_mm,                                     \
-       .restart_block = {                                              \
-               .fn = do_no_restart_syscall,                            \
-       },                                                              \
-       .se             = {                                             \
-               .group_node     = LIST_HEAD_INIT(tsk.se.group_node),    \
-       },                                                              \
-       .rt             = {                                             \
-               .run_list       = LIST_HEAD_INIT(tsk.rt.run_list),      \
-               .time_slice     = RR_TIMESLICE,                         \
-       },                                                              \
-       .tasks          = LIST_HEAD_INIT(tsk.tasks),                    \
-       INIT_PUSHABLE_TASKS(tsk)                                        \
-       INIT_CGROUP_SCHED(tsk)                                          \
-       .ptraced        = LIST_HEAD_INIT(tsk.ptraced),                  \
-       .ptrace_entry   = LIST_HEAD_INIT(tsk.ptrace_entry),             \
-       .real_parent    = &tsk,                                         \
-       .parent         = &tsk,                                         \
-       .children       = LIST_HEAD_INIT(tsk.children),                 \
-       .sibling        = LIST_HEAD_INIT(tsk.sibling),                  \
-       .group_leader   = &tsk,                                         \
-       RCU_POINTER_INITIALIZER(real_cred, &init_cred),                 \
-       RCU_POINTER_INITIALIZER(cred, &init_cred),                      \
-       .comm           = INIT_TASK_COMM,                               \
-       .thread         = INIT_THREAD,                                  \
-       .fs             = &init_fs,                                     \
-       .files          = &init_files,                                  \
-       .signal         = &init_signals,                                \
-       .sighand        = &init_sighand,                                \
-       .nsproxy        = &init_nsproxy,                                \
-       .pending        = {                                             \
-               .list = LIST_HEAD_INIT(tsk.pending.list),               \
-               .signal = {{0}}},                                       \
-       .blocked        = {{0}},                                        \
-       .alloc_lock     = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock),         \
-       .journal_info   = NULL,                                         \
-       INIT_CPU_TIMERS(tsk)                                            \
-       .pi_lock        = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),        \
-       .timer_slack_ns = 50000, /* 50 usec default slack */            \
-       .pids = {                                                       \
-               [PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID),            \
-               [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),           \
-               [PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),            \
-       },                                                              \
-       .thread_group   = LIST_HEAD_INIT(tsk.thread_group),             \
-       .thread_node    = LIST_HEAD_INIT(init_signals.thread_head),     \
-       INIT_IDS                                                        \
-       INIT_PERF_EVENTS(tsk)                                           \
-       INIT_TRACE_IRQFLAGS                                             \
-       INIT_LOCKDEP                                                    \
-       INIT_FTRACE_GRAPH                                               \
-       INIT_TRACE_RECURSION                                            \
-       INIT_TASK_RCU_PREEMPT(tsk)                                      \
-       INIT_TASK_RCU_TASKS(tsk)                                        \
-       INIT_CPUSET_SEQ(tsk)                                            \
-       INIT_RT_MUTEXES(tsk)                                            \
-       INIT_PREV_CPUTIME(tsk)                                          \
-       INIT_VTIME(tsk)                                                 \
-       INIT_NUMA_BALANCING(tsk)                                        \
-       INIT_KASAN(tsk)                                                 \
-       INIT_LIVEPATCH(tsk)                                             \
-       INIT_TASK_SECURITY                                              \
-}
-
-
 /* Attach to the init_task data structure for proper alignment */
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
 #define __init_task_data __attribute__((__section__(".data..init_task")))
+#else
+#define __init_task_data /**/
+#endif
 
+/* Attach to the thread_info data structure for proper alignment */
+#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
 
 #endif
diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h
new file mode 100644 (file)
index 0000000..2710d72
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The PTI (Parallel Trace Interface) driver directs trace data routed from
+ * various parts in the system out through the Intel Penwell PTI port and
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard.
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef LINUX_INTEL_PTI_H_
+#define LINUX_INTEL_PTI_H_
+
+/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
+#define PTI_LASTDWORD_DTS      0x30
+
+/* basic structure used as a write address to the PTI HW */
+struct pti_masterchannel {
+       u8 master;
+       u8 channel;
+};
+
+/* the following functions are defined in misc/pti.c */
+void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
+struct pti_masterchannel *pti_request_masterchannel(u8 type,
+                                                   const char *thread_name);
+void pti_release_masterchannel(struct pti_masterchannel *mc);
+
+#endif /* LINUX_INTEL_PTI_H_ */
index cb18c62..8415bf1 100644 (file)
@@ -273,7 +273,8 @@ struct ipv6_pinfo {
                                                 * 100: prefer care-of address
                                                 */
                                dontfrag:1,
-                               autoflowlabel:1;
+                               autoflowlabel:1,
+                               autoflowlabel_set:1;
        __u8                    min_hopcount;
        __u8                    tclass;
        __be32                  rcv_flowinfo;
index e140f69..a0231e9 100644 (file)
@@ -212,6 +212,7 @@ struct irq_data {
  *                               mask. Applies only to affinity managed irqs.
  * IRQD_SINGLE_TARGET          - IRQ allows only a single affinity target
  * IRQD_DEFAULT_TRIGGER_SET    - Expected trigger already been set
+ * IRQD_CAN_RESERVE            - Can use reservation mode
  */
 enum {
        IRQD_TRIGGER_MASK               = 0xf,
@@ -233,6 +234,7 @@ enum {
        IRQD_MANAGED_SHUTDOWN           = (1 << 23),
        IRQD_SINGLE_TARGET              = (1 << 24),
        IRQD_DEFAULT_TRIGGER_SET        = (1 << 25),
+       IRQD_CAN_RESERVE                = (1 << 26),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
        return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
 }
 
+static inline void irqd_set_can_reserve(struct irq_data *d)
+{
+       __irqd_to_state(d) |= IRQD_CAN_RESERVE;
+}
+
+static inline void irqd_clr_can_reserve(struct irq_data *d)
+{
+       __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
+}
+
+static inline bool irqd_can_reserve(struct irq_data *d)
+{
+       return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
index dd41895..25b33b6 100644 (file)
@@ -230,7 +230,7 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
        data->chip = chip;
 }
 
-static inline int irq_balancing_disabled(unsigned int irq)
+static inline bool irq_balancing_disabled(unsigned int irq)
 {
        struct irq_desc *desc;
 
@@ -238,7 +238,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
        return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
 }
 
-static inline int irq_is_percpu(unsigned int irq)
+static inline bool irq_is_percpu(unsigned int irq)
 {
        struct irq_desc *desc;
 
@@ -246,7 +246,7 @@ static inline int irq_is_percpu(unsigned int irq)
        return desc->status_use_accessors & IRQ_PER_CPU;
 }
 
-static inline int irq_is_percpu_devid(unsigned int irq)
+static inline bool irq_is_percpu_devid(unsigned int irq)
 {
        struct irq_desc *desc;
 
@@ -255,12 +255,15 @@ static inline int irq_is_percpu_devid(unsigned int irq)
 }
 
 static inline void
-irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
+irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+                     struct lock_class_key *request_class)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
-       if (desc)
-               lockdep_set_class(&desc->lock, class);
+       if (desc) {
+               lockdep_set_class(&desc->lock, lock_class);
+               lockdep_set_class(&desc->request_mutex, request_class);
+       }
 }
 
 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
index a34355d..48c7e86 100644 (file)
@@ -113,7 +113,7 @@ struct irq_domain_ops {
                     unsigned int nr_irqs, void *arg);
        void (*free)(struct irq_domain *d, unsigned int virq,
                     unsigned int nr_irqs);
-       int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
+       int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
        void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
        int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
                         unsigned long *out_hwirq, unsigned int *out_type);
index 46cb57d..9700f00 100644 (file)
 # define trace_hardirq_enter()                 \
 do {                                           \
        current->hardirq_context++;             \
-       crossrelease_hist_start(XHLOCK_HARD);   \
 } while (0)
 # define trace_hardirq_exit()                  \
 do {                                           \
        current->hardirq_context--;             \
-       crossrelease_hist_end(XHLOCK_HARD);     \
 } while (0)
 # define lockdep_softirq_enter()               \
 do {                                           \
        current->softirq_context++;             \
-       crossrelease_hist_start(XHLOCK_SOFT);   \
 } while (0)
 # define lockdep_softirq_exit()                        \
 do {                                           \
        current->softirq_context--;             \
-       crossrelease_hist_end(XHLOCK_SOFT);     \
 } while (0)
-# define INIT_TRACE_IRQFLAGS   .softirqs_enabled = 1,
 #else
 # define trace_hardirqs_on()           do { } while (0)
 # define trace_hardirqs_off()          do { } while (0)
@@ -58,7 +53,6 @@ do {                                          \
 # define trace_hardirq_exit()          do { } while (0)
 # define lockdep_softirq_enter()       do { } while (0)
 # define lockdep_softirq_exit()                do { } while (0)
-# define INIT_TRACE_IRQFLAGS
 #endif
 
 #if defined(CONFIG_IRQSOFF_TRACER) || \
index c7b368c..e0340ca 100644 (file)
@@ -160,6 +160,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
 extern int jump_label_text_reserved(void *start, void *end);
 extern void static_key_slow_inc(struct static_key *key);
 extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_inc_cpuslocked(struct static_key *key);
+extern void static_key_slow_dec_cpuslocked(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 extern int static_key_count(struct static_key *key);
 extern void static_key_enable(struct static_key *key);
@@ -222,6 +224,9 @@ static inline void static_key_slow_dec(struct static_key *key)
        atomic_dec(&key->enabled);
 }
 
+#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
+#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
+
 static inline int jump_label_text_reserved(void *start, void *end)
 {
        return 0;
@@ -416,6 +421,8 @@ extern bool ____wrong_branch_error(void);
 
 #define static_branch_inc(x)           static_key_slow_inc(&(x)->key)
 #define static_branch_dec(x)           static_key_slow_dec(&(x)->key)
+#define static_branch_inc_cpuslocked(x)        static_key_slow_inc_cpuslocked(&(x)->key)
+#define static_branch_dec_cpuslocked(x)        static_key_slow_dec_cpuslocked(&(x)->key)
 
 /*
  * Normal usage; boolean enable/disable.
index 708f337..bd118a6 100644 (file)
 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
                         2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
 
-#ifndef CONFIG_64BIT
-# define KALLSYM_FMT "%08lx"
-#else
-# define KALLSYM_FMT "%016lx"
-#endif
-
 struct module;
 
 #ifdef CONFIG_KALLSYMS
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
index 2e754b7..6bdd4b9 100644 (file)
@@ -232,7 +232,7 @@ struct kvm_vcpu {
        struct mutex mutex;
        struct kvm_run *run;
 
-       int guest_fpu_loaded, guest_xcr0_loaded;
+       int guest_xcr0_loaded;
        struct swait_queue_head wq;
        struct pid __rcu *pid;
        int sigset_active;
@@ -715,6 +715,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
                         unsigned long len);
 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 
+void kvm_sigset_activate(struct kvm_vcpu *vcpu);
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
+
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h
new file mode 100644 (file)
index 0000000..32e1e0f
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * include/lib/libgcc.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#ifndef __LIB_LIBGCC_H
+#define __LIB_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+       int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+       int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+       struct DWstruct s;
+       long long ll;
+} DWunion;
+
+#endif /* __ASM_LIBGCC_H */
index a842551..795634e 100644 (file)
@@ -158,12 +158,6 @@ struct lockdep_map {
        int                             cpu;
        unsigned long                   ip;
 #endif
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-       /*
-        * Whether it's a crosslock.
-        */
-       int                             cross;
-#endif
 };
 
 static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -267,95 +261,8 @@ struct held_lock {
        unsigned int hardirqs_off:1;
        unsigned int references:12;                                     /* 32 bits */
        unsigned int pin_count;
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-       /*
-        * Generation id.
-        *
-        * A value of cross_gen_id will be stored when holding this,
-        * which is globally increased whenever each crosslock is held.
-        */
-       unsigned int gen_id;
-#endif
-};
-
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-#define MAX_XHLOCK_TRACE_ENTRIES 5
-
-/*
- * This is for keeping locks waiting for commit so that true dependencies
- * can be added at commit step.
- */
-struct hist_lock {
-       /*
-        * Id for each entry in the ring buffer. This is used to
-        * decide whether the ring buffer was overwritten or not.
-        *
-        * For example,
-        *
-        *           |<----------- hist_lock ring buffer size ------->|
-        *           pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
-        * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
-        *
-        *           where 'p' represents an acquisition in process
-        *           context, 'i' represents an acquisition in irq
-        *           context.
-        *
-        * In this example, the ring buffer was overwritten by
-        * acquisitions in irq context, that should be detected on
-        * rollback or commit.
-        */
-       unsigned int hist_id;
-
-       /*
-        * Seperate stack_trace data. This will be used at commit step.
-        */
-       struct stack_trace      trace;
-       unsigned long           trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
-
-       /*
-        * Seperate hlock instance. This will be used at commit step.
-        *
-        * TODO: Use a smaller data structure containing only necessary
-        * data. However, we should make lockdep code able to handle the
-        * smaller one first.
-        */
-       struct held_lock        hlock;
 };
 
-/*
- * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
- * be called instead of lockdep_init_map().
- */
-struct cross_lock {
-       /*
-        * When more than one acquisition of crosslocks are overlapped,
-        * we have to perform commit for them based on cross_gen_id of
-        * the first acquisition, which allows us to add more true
-        * dependencies.
-        *
-        * Moreover, when no acquisition of a crosslock is in progress,
-        * we should not perform commit because the lock might not exist
-        * any more, which might cause incorrect memory access. So we
-        * have to track the number of acquisitions of a crosslock.
-        */
-       int nr_acquire;
-
-       /*
-        * Seperate hlock instance. This will be used at commit step.
-        *
-        * TODO: Use a smaller data structure containing only necessary
-        * data. However, we should make lockdep code able to handle the
-        * smaller one first.
-        */
-       struct held_lock        hlock;
-};
-
-struct lockdep_map_cross {
-       struct lockdep_map map;
-       struct cross_lock xlock;
-};
-#endif
-
 /*
  * Initialization, self-test and debugging-output methods:
  */
@@ -460,8 +367,6 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
 
-# define INIT_LOCKDEP                          .lockdep_recursion = 0,
-
 #define lockdep_depth(tsk)     (debug_locks ? (tsk)->lockdep_depth : 0)
 
 #define lockdep_assert_held(l) do {                            \
@@ -519,7 +424,6 @@ static inline void lockdep_on(void)
  * #ifdef the call himself.
  */
 
-# define INIT_LOCKDEP
 # define lockdep_reset()               do { debug_locks = 1; } while (0)
 # define lockdep_free_key_range(start, size)   do { } while (0)
 # define lockdep_sys_exit()                    do { } while (0)
@@ -560,37 +464,6 @@ enum xhlock_context_t {
        XHLOCK_CTX_NR,
 };
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
-                                      const char *name,
-                                      struct lock_class_key *key,
-                                      int subclass);
-extern void lock_commit_crosslock(struct lockdep_map *lock);
-
-/*
- * What we essencially have to initialize is 'nr_acquire'. Other members
- * will be initialized in add_xlock().
- */
-#define STATIC_CROSS_LOCK_INIT() \
-       { .nr_acquire = 0,}
-
-#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
-       { .map.name = (_name), .map.key = (void *)(_key), \
-         .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
-
-/*
- * To initialize a lockdep_map statically use this macro.
- * Note that _name must not be NULL.
- */
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
-       { .name = (_name), .key = (void *)(_key), .cross = 0, }
-
-extern void crossrelease_hist_start(enum xhlock_context_t c);
-extern void crossrelease_hist_end(enum xhlock_context_t c);
-extern void lockdep_invariant_state(bool force);
-extern void lockdep_init_task(struct task_struct *task);
-extern void lockdep_free_task(struct task_struct *task);
-#else /* !CROSSRELEASE */
 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
 /*
  * To initialize a lockdep_map statically use this macro.
@@ -599,12 +472,9 @@ extern void lockdep_free_task(struct task_struct *task);
 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
        { .name = (_name), .key = (void *)(_key), }
 
-static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
-static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
 static inline void lockdep_invariant_state(bool force) {}
 static inline void lockdep_init_task(struct task_struct *task) {}
 static inline void lockdep_free_task(struct task_struct *task) {}
-#endif /* CROSSRELEASE */
 
 #ifdef CONFIG_LOCK_STAT
 
index 78dc853..080798f 100644 (file)
@@ -645,11 +645,6 @@ struct axp20x_dev {
        const struct regmap_irq_chip    *regmap_irq_chip;
 };
 
-struct axp288_extcon_pdata {
-       /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
-       struct gpio_desc *gpio_mux_cntl;
-};
-
 /* generic helper function for reading 9-16 bit wide regs */
 static inline int axp20x_read_variable_width(struct regmap *regmap,
        unsigned int reg, unsigned int width)
index 4e887ba..c615359 100644 (file)
@@ -322,6 +322,10 @@ extern struct attribute_group cros_ec_attr_group;
 extern struct attribute_group cros_ec_lightbar_attr_group;
 extern struct attribute_group cros_ec_vbc_attr_group;
 
+/* debugfs stuff */
+int cros_ec_debugfs_init(struct cros_ec_dev *ec);
+void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
+
 /* ACPI GPE handler */
 #ifdef CONFIG_ACPI
 
index 2b16e95..a83f649 100644 (file)
@@ -2904,16 +2904,33 @@ enum usb_pd_control_mux {
        USB_PD_CTRL_MUX_AUTO = 5,
 };
 
+enum usb_pd_control_swap {
+       USB_PD_CTRL_SWAP_NONE = 0,
+       USB_PD_CTRL_SWAP_DATA = 1,
+       USB_PD_CTRL_SWAP_POWER = 2,
+       USB_PD_CTRL_SWAP_VCONN = 3,
+       USB_PD_CTRL_SWAP_COUNT
+};
+
 struct ec_params_usb_pd_control {
        uint8_t port;
        uint8_t role;
        uint8_t mux;
+       uint8_t swap;
 } __packed;
 
 #define PD_CTRL_RESP_ENABLED_COMMS      (1 << 0) /* Communication enabled */
 #define PD_CTRL_RESP_ENABLED_CONNECTED  (1 << 1) /* Device connected */
 #define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
 
+#define PD_CTRL_RESP_ROLE_POWER         BIT(0) /* 0=SNK/1=SRC */
+#define PD_CTRL_RESP_ROLE_DATA          BIT(1) /* 0=UFP/1=DFP */
+#define PD_CTRL_RESP_ROLE_VCONN         BIT(2) /* Vconn status */
+#define PD_CTRL_RESP_ROLE_DR_POWER      BIT(3) /* Partner is dualrole power */
+#define PD_CTRL_RESP_ROLE_DR_DATA       BIT(4) /* Partner is dualrole data */
+#define PD_CTRL_RESP_ROLE_USB_COMM      BIT(5) /* Partner USB comm capable */
+#define PD_CTRL_RESP_ROLE_EXT_POWERED   BIT(6) /* Partner externally powerd */
+
 struct ec_response_usb_pd_control_v1 {
        uint8_t enabled;
        uint8_t role;
index 3c8568a..75e5c8f 100644 (file)
@@ -3733,6 +3733,9 @@ enum usb_irq_events {
 #define TPS65917_REGEN3_CTRL_MODE_ACTIVE                       0x01
 #define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT                 0x00
 
+/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
+#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK            0xC
+
 /* Registers for function RESOURCE */
 #define TPS65917_REGEN1_CTRL                                   0x2
 #define TPS65917_PLLEN_CTRL                                    0x3
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
new file mode 100644 (file)
index 0000000..796fb97
--- /dev/null
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Core definitions for RAVE SP MFD driver.
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#ifndef _LINUX_RAVE_SP_H_
+#define _LINUX_RAVE_SP_H_
+
+#include <linux/notifier.h>
+
+enum rave_sp_command {
+       RAVE_SP_CMD_GET_FIRMWARE_VERSION        = 0x20,
+       RAVE_SP_CMD_GET_BOOTLOADER_VERSION      = 0x21,
+       RAVE_SP_CMD_BOOT_SOURCE                 = 0x26,
+       RAVE_SP_CMD_GET_BOARD_COPPER_REV        = 0x2B,
+       RAVE_SP_CMD_GET_GPIO_STATE              = 0x2F,
+
+       RAVE_SP_CMD_STATUS                      = 0xA0,
+       RAVE_SP_CMD_SW_WDT                      = 0xA1,
+       RAVE_SP_CMD_PET_WDT                     = 0xA2,
+       RAVE_SP_CMD_RESET                       = 0xA7,
+       RAVE_SP_CMD_RESET_REASON                = 0xA8,
+
+       RAVE_SP_CMD_REQ_COPPER_REV              = 0xB6,
+       RAVE_SP_CMD_GET_I2C_DEVICE_STATUS       = 0xBA,
+       RAVE_SP_CMD_GET_SP_SILICON_REV          = 0xB9,
+       RAVE_SP_CMD_CONTROL_EVENTS              = 0xBB,
+
+       RAVE_SP_EVNT_BASE                       = 0xE0,
+};
+
+struct rave_sp;
+
+static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
+{
+       return ((unsigned long)value << 8) | event;
+}
+
+static inline u8 rave_sp_action_unpack_event(unsigned long action)
+{
+       return action;
+}
+
+static inline u8 rave_sp_action_unpack_value(unsigned long action)
+{
+       return action >> 8;
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+                void *__data,  size_t data_size,
+                void *reply_data, size_t reply_data_size);
+
+struct device;
+int devm_rave_sp_register_event_notifier(struct device *dev,
+                                        struct notifier_block *nb);
+
+#endif /* _LINUX_RAVE_SP_H_ */
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/mfd/rtsx_common.h
deleted file mode 100644 (file)
index 443176e..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Driver for Realtek driver-based card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- */
-
-#ifndef __RTSX_COMMON_H
-#define __RTSX_COMMON_H
-
-#define DRV_NAME_RTSX_PCI              "rtsx_pci"
-#define DRV_NAME_RTSX_PCI_SDMMC                "rtsx_pci_sdmmc"
-#define DRV_NAME_RTSX_PCI_MS           "rtsx_pci_ms"
-
-#define RTSX_REG_PAIR(addr, val)       (((u32)(addr) << 16) | (u8)(val))
-
-#define RTSX_SSC_DEPTH_4M              0x01
-#define RTSX_SSC_DEPTH_2M              0x02
-#define RTSX_SSC_DEPTH_1M              0x03
-#define RTSX_SSC_DEPTH_500K            0x04
-#define RTSX_SSC_DEPTH_250K            0x05
-
-#define RTSX_SD_CARD                   0
-#define RTSX_MS_CARD                   1
-
-#define CLK_TO_DIV_N                   0
-#define DIV_N_TO_CLK                   1
-
-struct platform_device;
-
-struct rtsx_slot {
-       struct platform_device  *p_dev;
-       void                    (*card_event)(struct platform_device *p_dev);
-};
-
-#endif
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
deleted file mode 100644 (file)
index a2a1318..0000000
+++ /dev/null
@@ -1,1141 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Wei WANG <wei_wang@realsil.com.cn>
- */
-
-#ifndef __RTSX_PCI_H
-#define __RTSX_PCI_H
-
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/mfd/rtsx_common.h>
-
-#define MAX_RW_REG_CNT                 1024
-
-#define RTSX_HCBAR                     0x00
-#define RTSX_HCBCTLR                   0x04
-#define   STOP_CMD                     (0x01 << 28)
-#define   READ_REG_CMD                 0
-#define   WRITE_REG_CMD                        1
-#define   CHECK_REG_CMD                        2
-
-#define RTSX_HDBAR                     0x08
-#define   SG_INT                       0x04
-#define   SG_END                       0x02
-#define   SG_VALID                     0x01
-#define   SG_NO_OP                     0x00
-#define   SG_TRANS_DATA                        (0x02 << 4)
-#define   SG_LINK_DESC                 (0x03 << 4)
-#define RTSX_HDBCTLR                   0x0C
-#define   SDMA_MODE                    0x00
-#define   ADMA_MODE                    (0x02 << 26)
-#define   STOP_DMA                     (0x01 << 28)
-#define   TRIG_DMA                     (0x01 << 31)
-
-#define RTSX_HAIMR                     0x10
-#define   HAIMR_TRANS_START            (0x01 << 31)
-#define   HAIMR_READ                   0x00
-#define   HAIMR_WRITE                  (0x01 << 30)
-#define   HAIMR_READ_START             (HAIMR_TRANS_START | HAIMR_READ)
-#define   HAIMR_WRITE_START            (HAIMR_TRANS_START | HAIMR_WRITE)
-#define   HAIMR_TRANS_END                      (HAIMR_TRANS_START)
-
-#define RTSX_BIPR                      0x14
-#define   CMD_DONE_INT                 (1 << 31)
-#define   DATA_DONE_INT                        (1 << 30)
-#define   TRANS_OK_INT                 (1 << 29)
-#define   TRANS_FAIL_INT               (1 << 28)
-#define   XD_INT                       (1 << 27)
-#define   MS_INT                       (1 << 26)
-#define   SD_INT                       (1 << 25)
-#define   GPIO0_INT                    (1 << 24)
-#define   OC_INT                       (1 << 23)
-#define   SD_WRITE_PROTECT             (1 << 19)
-#define   XD_EXIST                     (1 << 18)
-#define   MS_EXIST                     (1 << 17)
-#define   SD_EXIST                     (1 << 16)
-#define   DELINK_INT                   GPIO0_INT
-#define   MS_OC_INT                    (1 << 23)
-#define   SD_OC_INT                    (1 << 22)
-
-#define CARD_INT               (XD_INT | MS_INT | SD_INT)
-#define NEED_COMPLETE_INT      (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
-#define RTSX_INT               (CMD_DONE_INT | NEED_COMPLETE_INT | \
-                                       CARD_INT | GPIO0_INT | OC_INT)
-#define CARD_EXIST             (XD_EXIST | MS_EXIST | SD_EXIST)
-
-#define RTSX_BIER                      0x18
-#define   CMD_DONE_INT_EN              (1 << 31)
-#define   DATA_DONE_INT_EN             (1 << 30)
-#define   TRANS_OK_INT_EN              (1 << 29)
-#define   TRANS_FAIL_INT_EN            (1 << 28)
-#define   XD_INT_EN                    (1 << 27)
-#define   MS_INT_EN                    (1 << 26)
-#define   SD_INT_EN                    (1 << 25)
-#define   GPIO0_INT_EN                 (1 << 24)
-#define   OC_INT_EN                    (1 << 23)
-#define   DELINK_INT_EN                        GPIO0_INT_EN
-#define   MS_OC_INT_EN                 (1 << 23)
-#define   SD_OC_INT_EN                 (1 << 22)
-
-
-/*
- * macros for easy use
- */
-#define rtsx_pci_writel(pcr, reg, value) \
-       iowrite32(value, (pcr)->remap_addr + reg)
-#define rtsx_pci_readl(pcr, reg) \
-       ioread32((pcr)->remap_addr + reg)
-#define rtsx_pci_writew(pcr, reg, value) \
-       iowrite16(value, (pcr)->remap_addr + reg)
-#define rtsx_pci_readw(pcr, reg) \
-       ioread16((pcr)->remap_addr + reg)
-#define rtsx_pci_writeb(pcr, reg, value) \
-       iowrite8(value, (pcr)->remap_addr + reg)
-#define rtsx_pci_readb(pcr, reg) \
-       ioread8((pcr)->remap_addr + reg)
-
-#define rtsx_pci_read_config_byte(pcr, where, val) \
-       pci_read_config_byte((pcr)->pci, where, val)
-
-#define rtsx_pci_write_config_byte(pcr, where, val) \
-       pci_write_config_byte((pcr)->pci, where, val)
-
-#define rtsx_pci_read_config_dword(pcr, where, val) \
-       pci_read_config_dword((pcr)->pci, where, val)
-
-#define rtsx_pci_write_config_dword(pcr, where, val) \
-       pci_write_config_dword((pcr)->pci, where, val)
-
-#define STATE_TRANS_NONE               0
-#define STATE_TRANS_CMD                        1
-#define STATE_TRANS_BUF                        2
-#define STATE_TRANS_SG                 3
-
-#define TRANS_NOT_READY                        0
-#define TRANS_RESULT_OK                        1
-#define TRANS_RESULT_FAIL              2
-#define TRANS_NO_DEVICE                        3
-
-#define RTSX_RESV_BUF_LEN              4096
-#define HOST_CMDS_BUF_LEN              1024
-#define HOST_SG_TBL_BUF_LEN            (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
-#define HOST_SG_TBL_ITEMS              (HOST_SG_TBL_BUF_LEN / 8)
-#define MAX_SG_ITEM_LEN                        0x80000
-#define HOST_TO_DEVICE                 0
-#define DEVICE_TO_HOST                 1
-
-#define OUTPUT_3V3                     0
-#define OUTPUT_1V8                     1
-
-#define RTSX_PHASE_MAX                 32
-#define RX_TUNING_CNT                  3
-
-#define MS_CFG                         0xFD40
-#define   SAMPLE_TIME_RISING           0x00
-#define   SAMPLE_TIME_FALLING          0x80
-#define   PUSH_TIME_DEFAULT            0x00
-#define   PUSH_TIME_ODD                        0x40
-#define   NO_EXTEND_TOGGLE             0x00
-#define   EXTEND_TOGGLE_CHK            0x20
-#define   MS_BUS_WIDTH_1               0x00
-#define   MS_BUS_WIDTH_4               0x10
-#define   MS_BUS_WIDTH_8               0x18
-#define   MS_2K_SECTOR_MODE            0x04
-#define   MS_512_SECTOR_MODE           0x00
-#define   MS_TOGGLE_TIMEOUT_EN         0x00
-#define   MS_TOGGLE_TIMEOUT_DISEN      0x01
-#define MS_NO_CHECK_INT                        0x02
-#define MS_TPC                         0xFD41
-#define MS_TRANS_CFG                   0xFD42
-#define   WAIT_INT                     0x80
-#define   NO_WAIT_INT                  0x00
-#define   NO_AUTO_READ_INT_REG         0x00
-#define   AUTO_READ_INT_REG            0x40
-#define   MS_CRC16_ERR                 0x20
-#define   MS_RDY_TIMEOUT               0x10
-#define   MS_INT_CMDNK                 0x08
-#define   MS_INT_BREQ                  0x04
-#define   MS_INT_ERR                   0x02
-#define   MS_INT_CED                   0x01
-#define MS_TRANSFER                    0xFD43
-#define   MS_TRANSFER_START            0x80
-#define   MS_TRANSFER_END              0x40
-#define   MS_TRANSFER_ERR              0x20
-#define   MS_BS_STATE                  0x10
-#define   MS_TM_READ_BYTES             0x00
-#define   MS_TM_NORMAL_READ            0x01
-#define   MS_TM_WRITE_BYTES            0x04
-#define   MS_TM_NORMAL_WRITE           0x05
-#define   MS_TM_AUTO_READ              0x08
-#define   MS_TM_AUTO_WRITE             0x0C
-#define MS_INT_REG                     0xFD44
-#define MS_BYTE_CNT                    0xFD45
-#define MS_SECTOR_CNT_L                        0xFD46
-#define MS_SECTOR_CNT_H                        0xFD47
-#define MS_DBUS_H                      0xFD48
-
-#define SD_CFG1                                0xFDA0
-#define   SD_CLK_DIVIDE_0              0x00
-#define   SD_CLK_DIVIDE_256            0xC0
-#define   SD_CLK_DIVIDE_128            0x80
-#define   SD_BUS_WIDTH_1BIT            0x00
-#define   SD_BUS_WIDTH_4BIT            0x01
-#define   SD_BUS_WIDTH_8BIT            0x02
-#define   SD_ASYNC_FIFO_NOT_RST                0x10
-#define   SD_20_MODE                   0x00
-#define   SD_DDR_MODE                  0x04
-#define   SD_30_MODE                   0x08
-#define   SD_CLK_DIVIDE_MASK           0xC0
-#define SD_CFG2                                0xFDA1
-#define   SD_CALCULATE_CRC7            0x00
-#define   SD_NO_CALCULATE_CRC7         0x80
-#define   SD_CHECK_CRC16               0x00
-#define   SD_NO_CHECK_CRC16            0x40
-#define   SD_NO_CHECK_WAIT_CRC_TO      0x20
-#define   SD_WAIT_BUSY_END             0x08
-#define   SD_NO_WAIT_BUSY_END          0x00
-#define   SD_CHECK_CRC7                        0x00
-#define   SD_NO_CHECK_CRC7             0x04
-#define   SD_RSP_LEN_0                 0x00
-#define   SD_RSP_LEN_6                 0x01
-#define   SD_RSP_LEN_17                        0x02
-#define   SD_RSP_TYPE_R0               0x04
-#define   SD_RSP_TYPE_R1               0x01
-#define   SD_RSP_TYPE_R1b              0x09
-#define   SD_RSP_TYPE_R2               0x02
-#define   SD_RSP_TYPE_R3               0x05
-#define   SD_RSP_TYPE_R4               0x05
-#define   SD_RSP_TYPE_R5               0x01
-#define   SD_RSP_TYPE_R6               0x01
-#define   SD_RSP_TYPE_R7               0x01
-#define SD_CFG3                                0xFDA2
-#define   SD_RSP_80CLK_TIMEOUT_EN      0x01
-
-#define SD_STAT1                       0xFDA3
-#define   SD_CRC7_ERR                  0x80
-#define   SD_CRC16_ERR                 0x40
-#define   SD_CRC_WRITE_ERR             0x20
-#define   SD_CRC_WRITE_ERR_MASK                0x1C
-#define   GET_CRC_TIME_OUT             0x02
-#define   SD_TUNING_COMPARE_ERR                0x01
-#define SD_STAT2                       0xFDA4
-#define   SD_RSP_80CLK_TIMEOUT         0x01
-
-#define SD_BUS_STAT                    0xFDA5
-#define   SD_CLK_TOGGLE_EN             0x80
-#define   SD_CLK_FORCE_STOP            0x40
-#define   SD_DAT3_STATUS               0x10
-#define   SD_DAT2_STATUS               0x08
-#define   SD_DAT1_STATUS               0x04
-#define   SD_DAT0_STATUS               0x02
-#define   SD_CMD_STATUS                        0x01
-#define SD_PAD_CTL                     0xFDA6
-#define   SD_IO_USING_1V8              0x80
-#define   SD_IO_USING_3V3              0x7F
-#define   TYPE_A_DRIVING               0x00
-#define   TYPE_B_DRIVING               0x01
-#define   TYPE_C_DRIVING               0x02
-#define   TYPE_D_DRIVING               0x03
-#define SD_SAMPLE_POINT_CTL            0xFDA7
-#define   DDR_FIX_RX_DAT               0x00
-#define   DDR_VAR_RX_DAT               0x80
-#define   DDR_FIX_RX_DAT_EDGE          0x00
-#define   DDR_FIX_RX_DAT_14_DELAY      0x40
-#define   DDR_FIX_RX_CMD               0x00
-#define   DDR_VAR_RX_CMD               0x20
-#define   DDR_FIX_RX_CMD_POS_EDGE      0x00
-#define   DDR_FIX_RX_CMD_14_DELAY      0x10
-#define   SD20_RX_POS_EDGE             0x00
-#define   SD20_RX_14_DELAY             0x08
-#define SD20_RX_SEL_MASK               0x08
-#define SD_PUSH_POINT_CTL              0xFDA8
-#define   DDR_FIX_TX_CMD_DAT           0x00
-#define   DDR_VAR_TX_CMD_DAT           0x80
-#define   DDR_FIX_TX_DAT_14_TSU                0x00
-#define   DDR_FIX_TX_DAT_12_TSU                0x40
-#define   DDR_FIX_TX_CMD_NEG_EDGE      0x00
-#define   DDR_FIX_TX_CMD_14_AHEAD      0x20
-#define   SD20_TX_NEG_EDGE             0x00
-#define   SD20_TX_14_AHEAD             0x10
-#define   SD20_TX_SEL_MASK             0x10
-#define   DDR_VAR_SDCLK_POL_SWAP       0x01
-#define SD_CMD0                                0xFDA9
-#define   SD_CMD_START                 0x40
-#define SD_CMD1                                0xFDAA
-#define SD_CMD2                                0xFDAB
-#define SD_CMD3                                0xFDAC
-#define SD_CMD4                                0xFDAD
-#define SD_CMD5                                0xFDAE
-#define SD_BYTE_CNT_L                  0xFDAF
-#define SD_BYTE_CNT_H                  0xFDB0
-#define SD_BLOCK_CNT_L                 0xFDB1
-#define SD_BLOCK_CNT_H                 0xFDB2
-#define SD_TRANSFER                    0xFDB3
-#define   SD_TRANSFER_START            0x80
-#define   SD_TRANSFER_END              0x40
-#define   SD_STAT_IDLE                 0x20
-#define   SD_TRANSFER_ERR              0x10
-#define   SD_TM_NORMAL_WRITE           0x00
-#define   SD_TM_AUTO_WRITE_3           0x01
-#define   SD_TM_AUTO_WRITE_4           0x02
-#define   SD_TM_AUTO_READ_3            0x05
-#define   SD_TM_AUTO_READ_4            0x06
-#define   SD_TM_CMD_RSP                        0x08
-#define   SD_TM_AUTO_WRITE_1           0x09
-#define   SD_TM_AUTO_WRITE_2           0x0A
-#define   SD_TM_NORMAL_READ            0x0C
-#define   SD_TM_AUTO_READ_1            0x0D
-#define   SD_TM_AUTO_READ_2            0x0E
-#define   SD_TM_AUTO_TUNING            0x0F
-#define SD_CMD_STATE                   0xFDB5
-#define   SD_CMD_IDLE                  0x80
-
-#define SD_DATA_STATE                  0xFDB6
-#define   SD_DATA_IDLE                 0x80
-
-#define SRCTL                          0xFC13
-
-#define DCM_DRP_CTL                    0xFC23
-#define   DCM_RESET                    0x08
-#define   DCM_LOCKED                   0x04
-#define   DCM_208M                     0x00
-#define   DCM_TX                       0x01
-#define   DCM_RX                       0x02
-#define DCM_DRP_TRIG                   0xFC24
-#define   DRP_START                    0x80
-#define   DRP_DONE                     0x40
-#define DCM_DRP_CFG                    0xFC25
-#define   DRP_WRITE                    0x80
-#define   DRP_READ                     0x00
-#define   DCM_WRITE_ADDRESS_50         0x50
-#define   DCM_WRITE_ADDRESS_51         0x51
-#define   DCM_READ_ADDRESS_00          0x00
-#define   DCM_READ_ADDRESS_51          0x51
-#define DCM_DRP_WR_DATA_L              0xFC26
-#define DCM_DRP_WR_DATA_H              0xFC27
-#define DCM_DRP_RD_DATA_L              0xFC28
-#define DCM_DRP_RD_DATA_H              0xFC29
-#define SD_VPCLK0_CTL                  0xFC2A
-#define SD_VPCLK1_CTL                  0xFC2B
-#define   PHASE_SELECT_MASK            0x1F
-#define SD_DCMPS0_CTL                  0xFC2C
-#define SD_DCMPS1_CTL                  0xFC2D
-#define SD_VPTX_CTL                    SD_VPCLK0_CTL
-#define SD_VPRX_CTL                    SD_VPCLK1_CTL
-#define   PHASE_CHANGE                 0x80
-#define   PHASE_NOT_RESET              0x40
-#define SD_DCMPS_TX_CTL                        SD_DCMPS0_CTL
-#define SD_DCMPS_RX_CTL                        SD_DCMPS1_CTL
-#define   DCMPS_CHANGE                 0x80
-#define   DCMPS_CHANGE_DONE            0x40
-#define   DCMPS_ERROR                  0x20
-#define   DCMPS_CURRENT_PHASE          0x1F
-#define CARD_CLK_SOURCE                        0xFC2E
-#define   CRC_FIX_CLK                  (0x00 << 0)
-#define   CRC_VAR_CLK0                 (0x01 << 0)
-#define   CRC_VAR_CLK1                 (0x02 << 0)
-#define   SD30_FIX_CLK                 (0x00 << 2)
-#define   SD30_VAR_CLK0                        (0x01 << 2)
-#define   SD30_VAR_CLK1                        (0x02 << 2)
-#define   SAMPLE_FIX_CLK               (0x00 << 4)
-#define   SAMPLE_VAR_CLK0              (0x01 << 4)
-#define   SAMPLE_VAR_CLK1              (0x02 << 4)
-#define CARD_PWR_CTL                   0xFD50
-#define   PMOS_STRG_MASK               0x10
-#define   PMOS_STRG_800mA              0x10
-#define   PMOS_STRG_400mA              0x00
-#define   SD_POWER_OFF                 0x03
-#define   SD_PARTIAL_POWER_ON          0x01
-#define   SD_POWER_ON                  0x00
-#define   SD_POWER_MASK                        0x03
-#define   MS_POWER_OFF                 0x0C
-#define   MS_PARTIAL_POWER_ON          0x04
-#define   MS_POWER_ON                  0x00
-#define   MS_POWER_MASK                        0x0C
-#define   BPP_POWER_OFF                        0x0F
-#define   BPP_POWER_5_PERCENT_ON       0x0E
-#define   BPP_POWER_10_PERCENT_ON      0x0C
-#define   BPP_POWER_15_PERCENT_ON      0x08
-#define   BPP_POWER_ON                 0x00
-#define   BPP_POWER_MASK               0x0F
-#define   SD_VCC_PARTIAL_POWER_ON      0x02
-#define   SD_VCC_POWER_ON              0x00
-#define CARD_CLK_SWITCH                        0xFD51
-#define RTL8411B_PACKAGE_MODE          0xFD51
-#define CARD_SHARE_MODE                        0xFD52
-#define   CARD_SHARE_MASK              0x0F
-#define   CARD_SHARE_MULTI_LUN         0x00
-#define   CARD_SHARE_NORMAL            0x00
-#define   CARD_SHARE_48_SD             0x04
-#define   CARD_SHARE_48_MS             0x08
-#define   CARD_SHARE_BAROSSA_SD                0x01
-#define   CARD_SHARE_BAROSSA_MS                0x02
-#define CARD_DRIVE_SEL                 0xFD53
-#define   MS_DRIVE_8mA                 (0x01 << 6)
-#define   MMC_DRIVE_8mA                        (0x01 << 4)
-#define   XD_DRIVE_8mA                 (0x01 << 2)
-#define   GPIO_DRIVE_8mA               0x01
-#define RTS5209_CARD_DRIVE_DEFAULT     (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
-                                       XD_DRIVE_8mA | GPIO_DRIVE_8mA)
-#define RTL8411_CARD_DRIVE_DEFAULT     (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
-                                       XD_DRIVE_8mA)
-#define RTSX_CARD_DRIVE_DEFAULT                (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
-
-#define CARD_STOP                      0xFD54
-#define   SPI_STOP                     0x01
-#define   XD_STOP                      0x02
-#define   SD_STOP                      0x04
-#define   MS_STOP                      0x08
-#define   SPI_CLR_ERR                  0x10
-#define   XD_CLR_ERR                   0x20
-#define   SD_CLR_ERR                   0x40
-#define   MS_CLR_ERR                   0x80
-#define CARD_OE                                0xFD55
-#define   SD_OUTPUT_EN                 0x04
-#define   MS_OUTPUT_EN                 0x08
-#define CARD_AUTO_BLINK                        0xFD56
-#define CARD_GPIO_DIR                  0xFD57
-#define CARD_GPIO                      0xFD58
-#define CARD_DATA_SOURCE               0xFD5B
-#define   PINGPONG_BUFFER              0x01
-#define   RING_BUFFER                  0x00
-#define SD30_CLK_DRIVE_SEL             0xFD5A
-#define   DRIVER_TYPE_A                        0x05
-#define   DRIVER_TYPE_B                        0x03
-#define   DRIVER_TYPE_C                        0x02
-#define   DRIVER_TYPE_D                        0x01
-#define CARD_SELECT                    0xFD5C
-#define   SD_MOD_SEL                   2
-#define   MS_MOD_SEL                   3
-#define SD30_DRIVE_SEL                 0xFD5E
-#define   CFG_DRIVER_TYPE_A            0x02
-#define   CFG_DRIVER_TYPE_B            0x03
-#define   CFG_DRIVER_TYPE_C            0x01
-#define   CFG_DRIVER_TYPE_D            0x00
-#define SD30_CMD_DRIVE_SEL             0xFD5E
-#define SD30_DAT_DRIVE_SEL             0xFD5F
-#define CARD_CLK_EN                    0xFD69
-#define   SD_CLK_EN                    0x04
-#define   MS_CLK_EN                    0x08
-#define SDIO_CTRL                      0xFD6B
-#define CD_PAD_CTL                     0xFD73
-#define   CD_DISABLE_MASK              0x07
-#define   MS_CD_DISABLE                        0x04
-#define   SD_CD_DISABLE                        0x02
-#define   XD_CD_DISABLE                        0x01
-#define   CD_DISABLE                   0x07
-#define   CD_ENABLE                    0x00
-#define   MS_CD_EN_ONLY                        0x03
-#define   SD_CD_EN_ONLY                        0x05
-#define   XD_CD_EN_ONLY                        0x06
-#define   FORCE_CD_LOW_MASK            0x38
-#define   FORCE_CD_XD_LOW              0x08
-#define   FORCE_CD_SD_LOW              0x10
-#define   FORCE_CD_MS_LOW              0x20
-#define   CD_AUTO_DISABLE              0x40
-#define FPDCTL                         0xFC00
-#define   SSC_POWER_DOWN               0x01
-#define   SD_OC_POWER_DOWN             0x02
-#define   ALL_POWER_DOWN               0x07
-#define   OC_POWER_DOWN                        0x06
-#define PDINFO                         0xFC01
-
-#define CLK_CTL                                0xFC02
-#define   CHANGE_CLK                   0x01
-#define   CLK_LOW_FREQ                 0x01
-
-#define CLK_DIV                                0xFC03
-#define   CLK_DIV_1                    0x01
-#define   CLK_DIV_2                    0x02
-#define   CLK_DIV_4                    0x03
-#define   CLK_DIV_8                    0x04
-#define CLK_SEL                                0xFC04
-
-#define SSC_DIV_N_0                    0xFC0F
-#define SSC_DIV_N_1                    0xFC10
-#define SSC_CTL1                       0xFC11
-#define    SSC_RSTB                    0x80
-#define    SSC_8X_EN                   0x40
-#define    SSC_FIX_FRAC                        0x20
-#define    SSC_SEL_1M                  0x00
-#define    SSC_SEL_2M                  0x08
-#define    SSC_SEL_4M                  0x10
-#define    SSC_SEL_8M                  0x18
-#define SSC_CTL2                       0xFC12
-#define    SSC_DEPTH_MASK              0x07
-#define    SSC_DEPTH_DISALBE           0x00
-#define    SSC_DEPTH_4M                        0x01
-#define    SSC_DEPTH_2M                        0x02
-#define    SSC_DEPTH_1M                        0x03
-#define    SSC_DEPTH_500K              0x04
-#define    SSC_DEPTH_250K              0x05
-#define RCCTL                          0xFC14
-
-#define FPGA_PULL_CTL                  0xFC1D
-#define OLT_LED_CTL                    0xFC1E
-#define GPIO_CTL                       0xFC1F
-
-#define LDO_CTL                                0xFC1E
-#define   BPP_ASIC_1V7                 0x00
-#define   BPP_ASIC_1V8                 0x01
-#define   BPP_ASIC_1V9                 0x02
-#define   BPP_ASIC_2V0                 0x03
-#define   BPP_ASIC_2V7                 0x04
-#define   BPP_ASIC_2V8                 0x05
-#define   BPP_ASIC_3V2                 0x06
-#define   BPP_ASIC_3V3                 0x07
-#define   BPP_REG_TUNED18              0x07
-#define   BPP_TUNED18_SHIFT_8402       5
-#define   BPP_TUNED18_SHIFT_8411       4
-#define   BPP_PAD_MASK                 0x04
-#define   BPP_PAD_3V3                  0x04
-#define   BPP_PAD_1V8                  0x00
-#define   BPP_LDO_POWB                 0x03
-#define   BPP_LDO_ON                   0x00
-#define   BPP_LDO_SUSPEND              0x02
-#define   BPP_LDO_OFF                  0x03
-#define SYS_VER                                0xFC32
-
-#define CARD_PULL_CTL1                 0xFD60
-#define CARD_PULL_CTL2                 0xFD61
-#define CARD_PULL_CTL3                 0xFD62
-#define CARD_PULL_CTL4                 0xFD63
-#define CARD_PULL_CTL5                 0xFD64
-#define CARD_PULL_CTL6                 0xFD65
-
-/* PCI Express Related Registers */
-#define IRQEN0                         0xFE20
-#define IRQSTAT0                       0xFE21
-#define    DMA_DONE_INT                        0x80
-#define    SUSPEND_INT                 0x40
-#define    LINK_RDY_INT                        0x20
-#define    LINK_DOWN_INT               0x10
-#define IRQEN1                         0xFE22
-#define IRQSTAT1                       0xFE23
-#define TLPRIEN                                0xFE24
-#define TLPRISTAT                      0xFE25
-#define TLPTIEN                                0xFE26
-#define TLPTISTAT                      0xFE27
-#define DMATC0                         0xFE28
-#define DMATC1                         0xFE29
-#define DMATC2                         0xFE2A
-#define DMATC3                         0xFE2B
-#define DMACTL                         0xFE2C
-#define   DMA_RST                      0x80
-#define   DMA_BUSY                     0x04
-#define   DMA_DIR_TO_CARD              0x00
-#define   DMA_DIR_FROM_CARD            0x02
-#define   DMA_EN                       0x01
-#define   DMA_128                      (0 << 4)
-#define   DMA_256                      (1 << 4)
-#define   DMA_512                      (2 << 4)
-#define   DMA_1024                     (3 << 4)
-#define   DMA_PACK_SIZE_MASK           0x30
-#define BCTL                           0xFE2D
-#define RBBC0                          0xFE2E
-#define RBBC1                          0xFE2F
-#define RBDAT                          0xFE30
-#define RBCTL                          0xFE34
-#define CFGADDR0                       0xFE35
-#define CFGADDR1                       0xFE36
-#define CFGDATA0                       0xFE37
-#define CFGDATA1                       0xFE38
-#define CFGDATA2                       0xFE39
-#define CFGDATA3                       0xFE3A
-#define CFGRWCTL                       0xFE3B
-#define PHYRWCTL                       0xFE3C
-#define PHYDATA0                       0xFE3D
-#define PHYDATA1                       0xFE3E
-#define PHYADDR                                0xFE3F
-#define MSGRXDATA0                     0xFE40
-#define MSGRXDATA1                     0xFE41
-#define MSGRXDATA2                     0xFE42
-#define MSGRXDATA3                     0xFE43
-#define MSGTXDATA0                     0xFE44
-#define MSGTXDATA1                     0xFE45
-#define MSGTXDATA2                     0xFE46
-#define MSGTXDATA3                     0xFE47
-#define MSGTXCTL                       0xFE48
-#define LTR_CTL                                0xFE4A
-#define LTR_TX_EN_MASK         BIT(7)
-#define LTR_TX_EN_1                    BIT(7)
-#define LTR_TX_EN_0                    0
-#define LTR_LATENCY_MODE_MASK          BIT(6)
-#define LTR_LATENCY_MODE_HW            0
-#define LTR_LATENCY_MODE_SW            BIT(6)
-#define OBFF_CFG                       0xFE4C
-
-#define CDRESUMECTL                    0xFE52
-#define WAKE_SEL_CTL                   0xFE54
-#define PCLK_CTL                       0xFE55
-#define   PCLK_MODE_SEL                        0x20
-#define PME_FORCE_CTL                  0xFE56
-
-#define ASPM_FORCE_CTL                 0xFE57
-#define   FORCE_ASPM_CTL0              0x10
-#define   FORCE_ASPM_VAL_MASK          0x03
-#define   FORCE_ASPM_L1_EN             0x02
-#define   FORCE_ASPM_L0_EN             0x01
-#define   FORCE_ASPM_NO_ASPM           0x00
-#define PM_CLK_FORCE_CTL               0xFE58
-#define FUNC_FORCE_CTL                 0xFE59
-#define   FUNC_FORCE_UPME_XMT_DBG      0x02
-#define PERST_GLITCH_WIDTH             0xFE5C
-#define CHANGE_LINK_STATE              0xFE5B
-#define RESET_LOAD_REG                 0xFE5E
-#define EFUSE_CONTENT                  0xFE5F
-#define HOST_SLEEP_STATE               0xFE60
-#define   HOST_ENTER_S1                        1
-#define   HOST_ENTER_S3                        2
-
-#define SDIO_CFG                       0xFE70
-#define PM_EVENT_DEBUG                 0xFE71
-#define   PME_DEBUG_0                  0x08
-#define NFTS_TX_CTRL                   0xFE72
-
-#define PWR_GATE_CTRL                  0xFE75
-#define   PWR_GATE_EN                  0x01
-#define   LDO3318_PWR_MASK             0x06
-#define   LDO_ON                       0x00
-#define   LDO_SUSPEND                  0x04
-#define   LDO_OFF                      0x06
-#define PWD_SUSPEND_EN                 0xFE76
-#define LDO_PWR_SEL                    0xFE78
-
-#define L1SUB_CONFIG1                  0xFE8D
-#define L1SUB_CONFIG2                  0xFE8E
-#define   L1SUB_AUTO_CFG               0x02
-#define L1SUB_CONFIG3                  0xFE8F
-#define   L1OFF_MBIAS2_EN_5250         BIT(7)
-
-#define DUMMY_REG_RESET_0              0xFE90
-
-#define AUTOLOAD_CFG_BASE              0xFF00
-#define PETXCFG                                0xFF03
-#define FORCE_CLKREQ_DELINK_MASK       BIT(7)
-#define FORCE_CLKREQ_LOW       0x80
-#define FORCE_CLKREQ_HIGH      0x00
-
-#define PM_CTRL1                       0xFF44
-#define   CD_RESUME_EN_MASK            0xF0
-
-#define PM_CTRL2                       0xFF45
-#define PM_CTRL3                       0xFF46
-#define   SDIO_SEND_PME_EN             0x80
-#define   FORCE_RC_MODE_ON             0x40
-#define   FORCE_RX50_LINK_ON           0x20
-#define   D3_DELINK_MODE_EN            0x10
-#define   USE_PESRTB_CTL_DELINK                0x08
-#define   DELAY_PIN_WAKE               0x04
-#define   RESET_PIN_WAKE               0x02
-#define   PM_WAKE_EN                   0x01
-#define PM_CTRL4                       0xFF47
-
-/* Memory mapping */
-#define SRAM_BASE                      0xE600
-#define RBUF_BASE                      0xF400
-#define PPBUF_BASE1                    0xF800
-#define PPBUF_BASE2                    0xFA00
-#define IMAGE_FLAG_ADDR0               0xCE80
-#define IMAGE_FLAG_ADDR1               0xCE81
-
-#define RREF_CFG                       0xFF6C
-#define   RREF_VBGSEL_MASK             0x38
-#define   RREF_VBGSEL_1V25             0x28
-
-#define OOBS_CONFIG                    0xFF6E
-#define   OOBS_AUTOK_DIS               0x80
-#define   OOBS_VAL_MASK                        0x1F
-
-#define LDO_DV18_CFG                   0xFF70
-#define   LDO_DV18_SR_MASK             0xC0
-#define   LDO_DV18_SR_DF               0x40
-
-#define LDO_CONFIG2                    0xFF71
-#define   LDO_D3318_MASK               0x07
-#define   LDO_D3318_33V                        0x07
-#define   LDO_D3318_18V                        0x02
-
-#define LDO_VCC_CFG0                   0xFF72
-#define   LDO_VCC_LMTVTH_MASK          0x30
-#define   LDO_VCC_LMTVTH_2A            0x10
-
-#define LDO_VCC_CFG1                   0xFF73
-#define   LDO_VCC_REF_TUNE_MASK                0x30
-#define   LDO_VCC_REF_1V2              0x20
-#define   LDO_VCC_TUNE_MASK            0x07
-#define   LDO_VCC_1V8                  0x04
-#define   LDO_VCC_3V3                  0x07
-#define   LDO_VCC_LMT_EN               0x08
-
-#define LDO_VIO_CFG                    0xFF75
-#define   LDO_VIO_SR_MASK              0xC0
-#define   LDO_VIO_SR_DF                        0x40
-#define   LDO_VIO_REF_TUNE_MASK                0x30
-#define   LDO_VIO_REF_1V2              0x20
-#define   LDO_VIO_TUNE_MASK            0x07
-#define   LDO_VIO_1V7                  0x03
-#define   LDO_VIO_1V8                  0x04
-#define   LDO_VIO_3V3                  0x07
-
-#define LDO_DV12S_CFG                  0xFF76
-#define   LDO_REF12_TUNE_MASK          0x18
-#define   LDO_REF12_TUNE_DF            0x10
-#define   LDO_D12_TUNE_MASK            0x07
-#define   LDO_D12_TUNE_DF              0x04
-
-#define LDO_AV12S_CFG                  0xFF77
-#define   LDO_AV12S_TUNE_MASK          0x07
-#define   LDO_AV12S_TUNE_DF            0x04
-
-#define SD40_LDO_CTL1                  0xFE7D
-#define   SD40_VIO_TUNE_MASK           0x70
-#define   SD40_VIO_TUNE_1V7            0x30
-#define   SD_VIO_LDO_1V8               0x40
-#define   SD_VIO_LDO_3V3               0x70
-
-/* Phy register */
-#define PHY_PCR                                0x00
-#define   PHY_PCR_FORCE_CODE           0xB000
-#define   PHY_PCR_OOBS_CALI_50         0x0800
-#define   PHY_PCR_OOBS_VCM_08          0x0200
-#define   PHY_PCR_OOBS_SEN_90          0x0040
-#define   PHY_PCR_RSSI_EN              0x0002
-#define   PHY_PCR_RX10K                        0x0001
-
-#define PHY_RCR0                       0x01
-#define PHY_RCR1                       0x02
-#define   PHY_RCR1_ADP_TIME_4          0x0400
-#define   PHY_RCR1_VCO_COARSE          0x001F
-#define   PHY_RCR1_INIT_27S            0x0A1F
-#define PHY_SSCCR2                     0x02
-#define   PHY_SSCCR2_PLL_NCODE         0x0A00
-#define   PHY_SSCCR2_TIME0             0x001C
-#define   PHY_SSCCR2_TIME2_WIDTH       0x0003
-
-#define PHY_RCR2                       0x03
-#define   PHY_RCR2_EMPHASE_EN          0x8000
-#define   PHY_RCR2_NADJR               0x4000
-#define   PHY_RCR2_CDR_SR_2            0x0100
-#define   PHY_RCR2_FREQSEL_12          0x0040
-#define   PHY_RCR2_CDR_SC_12P          0x0010
-#define   PHY_RCR2_CALIB_LATE          0x0002
-#define   PHY_RCR2_INIT_27S            0xC152
-#define PHY_SSCCR3                     0x03
-#define   PHY_SSCCR3_STEP_IN           0x2740
-#define   PHY_SSCCR3_CHECK_DELAY       0x0008
-#define _PHY_ANA03                     0x03
-#define   _PHY_ANA03_TIMER_MAX         0x2700
-#define   _PHY_ANA03_OOBS_DEB_EN       0x0040
-#define   _PHY_CMU_DEBUG_EN            0x0008
-
-#define PHY_RTCR                       0x04
-#define PHY_RDR                                0x05
-#define   PHY_RDR_RXDSEL_1_9           0x4000
-#define   PHY_SSC_AUTO_PWD             0x0600
-#define PHY_TCR0                       0x06
-#define PHY_TCR1                       0x07
-#define PHY_TUNE                       0x08
-#define   PHY_TUNE_TUNEREF_1_0         0x4000
-#define   PHY_TUNE_VBGSEL_1252         0x0C00
-#define   PHY_TUNE_SDBUS_33            0x0200
-#define   PHY_TUNE_TUNED18             0x01C0
-#define   PHY_TUNE_TUNED12             0X0020
-#define   PHY_TUNE_TUNEA12             0x0004
-#define   PHY_TUNE_VOLTAGE_MASK                0xFC3F
-#define   PHY_TUNE_VOLTAGE_3V3         0x03C0
-#define   PHY_TUNE_D18_1V8             0x0100
-#define   PHY_TUNE_D18_1V7             0x0080
-#define PHY_ANA08                      0x08
-#define   PHY_ANA08_RX_EQ_DCGAIN       0x5000
-#define   PHY_ANA08_SEL_RX_EN          0x0400
-#define   PHY_ANA08_RX_EQ_VAL          0x03C0
-#define   PHY_ANA08_SCP                        0x0020
-#define   PHY_ANA08_SEL_IPI            0x0004
-
-#define PHY_IMR                                0x09
-#define PHY_BPCR                       0x0A
-#define   PHY_BPCR_IBRXSEL             0x0400
-#define   PHY_BPCR_IBTXSEL             0x0100
-#define   PHY_BPCR_IB_FILTER           0x0080
-#define   PHY_BPCR_CMIRROR_EN          0x0040
-
-#define PHY_BIST                       0x0B
-#define PHY_RAW_L                      0x0C
-#define PHY_RAW_H                      0x0D
-#define PHY_RAW_DATA                   0x0E
-#define PHY_HOST_CLK_CTRL              0x0F
-#define PHY_DMR                                0x10
-#define PHY_BACR                       0x11
-#define   PHY_BACR_BASIC_MASK          0xFFF3
-#define PHY_IER                                0x12
-#define PHY_BCSR                       0x13
-#define PHY_BPR                                0x14
-#define PHY_BPNR2                      0x15
-#define PHY_BPNR                       0x16
-#define PHY_BRNR2                      0x17
-#define PHY_BENR                       0x18
-#define PHY_REV                                0x19
-#define   PHY_REV_RESV                 0xE000
-#define   PHY_REV_RXIDLE_LATCHED       0x1000
-#define   PHY_REV_P1_EN                        0x0800
-#define   PHY_REV_RXIDLE_EN            0x0400
-#define   PHY_REV_CLKREQ_TX_EN         0x0200
-#define   PHY_REV_CLKREQ_RX_EN         0x0100
-#define   PHY_REV_CLKREQ_DT_1_0                0x0040
-#define   PHY_REV_STOP_CLKRD           0x0020
-#define   PHY_REV_RX_PWST              0x0008
-#define   PHY_REV_STOP_CLKWR           0x0004
-#define _PHY_REV0                      0x19
-#define   _PHY_REV0_FILTER_OUT         0x3800
-#define   _PHY_REV0_CDR_BYPASS_PFD     0x0100
-#define   _PHY_REV0_CDR_RX_IDLE_BYPASS 0x0002
-
-#define PHY_FLD0                       0x1A
-#define PHY_ANA1A                      0x1A
-#define   PHY_ANA1A_TXR_LOOPBACK       0x2000
-#define   PHY_ANA1A_RXT_BIST           0x0500
-#define   PHY_ANA1A_TXR_BIST           0x0040
-#define   PHY_ANA1A_REV                        0x0006
-#define   PHY_FLD0_INIT_27S            0x2546
-#define PHY_FLD1                       0x1B
-#define PHY_FLD2                       0x1C
-#define PHY_FLD3                       0x1D
-#define   PHY_FLD3_TIMER_4             0x0800
-#define   PHY_FLD3_TIMER_6             0x0020
-#define   PHY_FLD3_RXDELINK            0x0004
-#define   PHY_FLD3_INIT_27S            0x0004
-#define PHY_ANA1D                      0x1D
-#define   PHY_ANA1D_DEBUG_ADDR         0x0004
-#define _PHY_FLD0                      0x1D
-#define   _PHY_FLD0_CLK_REQ_20C                0x8000
-#define   _PHY_FLD0_RX_IDLE_EN         0x1000
-#define   _PHY_FLD0_BIT_ERR_RSTN       0x0800
-#define   _PHY_FLD0_BER_COUNT          0x01E0
-#define   _PHY_FLD0_BER_TIMER          0x001E
-#define   _PHY_FLD0_CHECK_EN           0x0001
-
-#define PHY_FLD4                       0x1E
-#define   PHY_FLD4_FLDEN_SEL           0x4000
-#define   PHY_FLD4_REQ_REF             0x2000
-#define   PHY_FLD4_RXAMP_OFF           0x1000
-#define   PHY_FLD4_REQ_ADDA            0x0800
-#define   PHY_FLD4_BER_COUNT           0x00E0
-#define   PHY_FLD4_BER_TIMER           0x000A
-#define   PHY_FLD4_BER_CHK_EN          0x0001
-#define   PHY_FLD4_INIT_27S            0x5C7F
-#define PHY_DIG1E                      0x1E
-#define   PHY_DIG1E_REV                        0x4000
-#define   PHY_DIG1E_D0_X_D1            0x1000
-#define   PHY_DIG1E_RX_ON_HOST         0x0800
-#define   PHY_DIG1E_RCLK_REF_HOST      0x0400
-#define   PHY_DIG1E_RCLK_TX_EN_KEEP    0x0040
-#define   PHY_DIG1E_RCLK_TX_TERM_KEEP  0x0020
-#define   PHY_DIG1E_RCLK_RX_EIDLE_ON   0x0010
-#define   PHY_DIG1E_TX_TERM_KEEP       0x0008
-#define   PHY_DIG1E_RX_TERM_KEEP       0x0004
-#define   PHY_DIG1E_TX_EN_KEEP         0x0002
-#define   PHY_DIG1E_RX_EN_KEEP         0x0001
-#define PHY_DUM_REG                    0x1F
-
-#define PCR_ASPM_SETTING_REG1          0x160
-#define PCR_ASPM_SETTING_REG2          0x168
-
-#define PCR_SETTING_REG1               0x724
-#define PCR_SETTING_REG2               0x814
-#define PCR_SETTING_REG3               0x747
-
-#define rtsx_pci_init_cmd(pcr)         ((pcr)->ci = 0)
-
-#define RTS5227_DEVICE_ID              0x5227
-#define RTS_MAX_TIMES_FREQ_REDUCTION   8
-
-struct rtsx_pcr;
-
-struct pcr_handle {
-       struct rtsx_pcr                 *pcr;
-};
-
-struct pcr_ops {
-       int (*write_phy)(struct rtsx_pcr *pcr, u8 addr, u16 val);
-       int (*read_phy)(struct rtsx_pcr *pcr, u8 addr, u16 *val);
-       int             (*extra_init_hw)(struct rtsx_pcr *pcr);
-       int             (*optimize_phy)(struct rtsx_pcr *pcr);
-       int             (*turn_on_led)(struct rtsx_pcr *pcr);
-       int             (*turn_off_led)(struct rtsx_pcr *pcr);
-       int             (*enable_auto_blink)(struct rtsx_pcr *pcr);
-       int             (*disable_auto_blink)(struct rtsx_pcr *pcr);
-       int             (*card_power_on)(struct rtsx_pcr *pcr, int card);
-       int             (*card_power_off)(struct rtsx_pcr *pcr, int card);
-       int             (*switch_output_voltage)(struct rtsx_pcr *pcr,
-                                               u8 voltage);
-       unsigned int    (*cd_deglitch)(struct rtsx_pcr *pcr);
-       int             (*conv_clk_and_div_n)(int clk, int dir);
-       void            (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
-       void            (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
-
-       void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
-       int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
-       int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val);
-       void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
-       void (*full_on)(struct rtsx_pcr *pcr);
-       void (*power_saving)(struct rtsx_pcr *pcr);
-};
-
-enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
-
-#define ASPM_L1_1_EN_MASK              BIT(3)
-#define ASPM_L1_2_EN_MASK              BIT(2)
-#define PM_L1_1_EN_MASK                BIT(1)
-#define PM_L1_2_EN_MASK                BIT(0)
-
-#define ASPM_L1_1_EN                   BIT(0)
-#define ASPM_L1_2_EN                   BIT(1)
-#define PM_L1_1_EN                             BIT(2)
-#define PM_L1_2_EN                             BIT(3)
-#define LTR_L1SS_PWR_GATE_EN   BIT(4)
-#define L1_SNOOZE_TEST_EN              BIT(5)
-#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN        BIT(6)
-
-enum dev_aspm_mode {
-       DEV_ASPM_DISABLE = 0,
-       DEV_ASPM_DYNAMIC,
-       DEV_ASPM_BACKDOOR,
-       DEV_ASPM_STATIC,
-};
-
-/*
- * struct rtsx_cr_option  - card reader option
- * @dev_flags: device flags
- * @force_clkreq_0: force clock request
- * @ltr_en: enable ltr mode flag
- * @ltr_enabled: ltr mode in configure space flag
- * @ltr_active: ltr mode status
- * @ltr_active_latency: ltr mode active latency
- * @ltr_idle_latency: ltr mode idle latency
- * @ltr_l1off_latency: ltr mode l1off latency
- * @dev_aspm_mode: device aspm mode
- * @l1_snooze_delay: l1 snooze delay
- * @ltr_l1off_sspwrgate: ltr l1off sspwrgate
- * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
- */
-struct rtsx_cr_option {
-       u32 dev_flags;
-       bool force_clkreq_0;
-       bool ltr_en;
-       bool ltr_enabled;
-       bool ltr_active;
-       u32 ltr_active_latency;
-       u32 ltr_idle_latency;
-       u32 ltr_l1off_latency;
-       enum dev_aspm_mode dev_aspm_mode;
-       u32 l1_snooze_delay;
-       u8 ltr_l1off_sspwrgate;
-       u8 ltr_l1off_snooze_sspwrgate;
-};
-
-#define rtsx_set_dev_flag(cr, flag) \
-       ((cr)->option.dev_flags |= (flag))
-#define rtsx_clear_dev_flag(cr, flag) \
-       ((cr)->option.dev_flags &= ~(flag))
-#define rtsx_check_dev_flag(cr, flag) \
-       ((cr)->option.dev_flags & (flag))
-
-struct rtsx_pcr {
-       struct pci_dev                  *pci;
-       unsigned int                    id;
-       int                             pcie_cap;
-       struct rtsx_cr_option   option;
-
-       /* pci resources */
-       unsigned long                   addr;
-       void __iomem                    *remap_addr;
-       int                             irq;
-
-       /* host reserved buffer */
-       void                            *rtsx_resv_buf;
-       dma_addr_t                      rtsx_resv_buf_addr;
-
-       void                            *host_cmds_ptr;
-       dma_addr_t                      host_cmds_addr;
-       int                             ci;
-
-       void                            *host_sg_tbl_ptr;
-       dma_addr_t                      host_sg_tbl_addr;
-       int                             sgi;
-
-       u32                             bier;
-       char                            trans_result;
-
-       unsigned int                    card_inserted;
-       unsigned int                    card_removed;
-       unsigned int                    card_exist;
-
-       struct delayed_work             carddet_work;
-       struct delayed_work             idle_work;
-
-       spinlock_t                      lock;
-       struct mutex                    pcr_mutex;
-       struct completion               *done;
-       struct completion               *finish_me;
-
-       unsigned int                    cur_clock;
-       bool                            remove_pci;
-       bool                            msi_en;
-
-#define EXTRA_CAPS_SD_SDR50            (1 << 0)
-#define EXTRA_CAPS_SD_SDR104           (1 << 1)
-#define EXTRA_CAPS_SD_DDR50            (1 << 2)
-#define EXTRA_CAPS_MMC_HSDDR           (1 << 3)
-#define EXTRA_CAPS_MMC_HS200           (1 << 4)
-#define EXTRA_CAPS_MMC_8BIT            (1 << 5)
-       u32                             extra_caps;
-
-#define IC_VER_A                       0
-#define IC_VER_B                       1
-#define IC_VER_C                       2
-#define IC_VER_D                       3
-       u8                              ic_version;
-
-       u8                              sd30_drive_sel_1v8;
-       u8                              sd30_drive_sel_3v3;
-       u8                              card_drive_sel;
-#define ASPM_L1_EN                     0x02
-       u8                              aspm_en;
-       bool                            aspm_enabled;
-
-#define PCR_MS_PMOS                    (1 << 0)
-#define PCR_REVERSE_SOCKET             (1 << 1)
-       u32                             flags;
-
-       u32                             tx_initial_phase;
-       u32                             rx_initial_phase;
-
-       const u32                       *sd_pull_ctl_enable_tbl;
-       const u32                       *sd_pull_ctl_disable_tbl;
-       const u32                       *ms_pull_ctl_enable_tbl;
-       const u32                       *ms_pull_ctl_disable_tbl;
-
-       const struct pcr_ops            *ops;
-       enum PDEV_STAT                  state;
-
-       u16                             reg_pm_ctrl3;
-
-       int                             num_slots;
-       struct rtsx_slot                *slots;
-
-       u8                              dma_error_count;
-};
-
-#define PID_524A       0x524A
-#define PID_5249               0x5249
-#define PID_5250               0x5250
-#define PID_525A       0x525A
-
-#define CHK_PCI_PID(pcr, pid)          ((pcr)->pci->device == (pid))
-#define PCI_VID(pcr)                   ((pcr)->pci->vendor)
-#define PCI_PID(pcr)                   ((pcr)->pci->device)
-#define is_version(pcr, pid, ver)                              \
-       (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
-#define pcr_dbg(pcr, fmt, arg...)                              \
-       dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
-
-#define SDR104_PHASE(val)              ((val) & 0xFF)
-#define SDR50_PHASE(val)               (((val) >> 8) & 0xFF)
-#define DDR50_PHASE(val)               (((val) >> 16) & 0xFF)
-#define SDR104_TX_PHASE(pcr)           SDR104_PHASE((pcr)->tx_initial_phase)
-#define SDR50_TX_PHASE(pcr)            SDR50_PHASE((pcr)->tx_initial_phase)
-#define DDR50_TX_PHASE(pcr)            DDR50_PHASE((pcr)->tx_initial_phase)
-#define SDR104_RX_PHASE(pcr)           SDR104_PHASE((pcr)->rx_initial_phase)
-#define SDR50_RX_PHASE(pcr)            SDR50_PHASE((pcr)->rx_initial_phase)
-#define DDR50_RX_PHASE(pcr)            DDR50_PHASE((pcr)->rx_initial_phase)
-#define SET_CLOCK_PHASE(sdr104, sdr50, ddr50)  \
-                               (((ddr50) << 16) | ((sdr50) << 8) | (sdr104))
-
-void rtsx_pci_start_run(struct rtsx_pcr *pcr);
-int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data);
-int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data);
-int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
-int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
-void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr);
-void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
-               u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
-void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr);
-int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout);
-int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read, int timeout);
-int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read);
-void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read);
-int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int count, bool read, int timeout);
-int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
-int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
-int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card);
-int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card);
-int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
-               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
-int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card);
-int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card);
-int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card);
-int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage);
-unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr);
-void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr);
-
-static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
-{
-       return (u8 *)(pcr->host_cmds_ptr);
-}
-
-static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
-               u8 mask, u8 append)
-{
-       int err;
-       u8 val;
-
-       err = pci_read_config_byte(pcr->pci, addr, &val);
-       if (err < 0)
-               return err;
-       return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
-}
-
-static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
-{
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg,     0xFF, val >> 24);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
-}
-
-static inline int rtsx_pci_update_phy(struct rtsx_pcr *pcr, u8 addr,
-       u16 mask, u16 append)
-{
-       int err;
-       u16 val;
-
-       err = rtsx_pci_read_phy_register(pcr, addr, &val);
-       if (err < 0)
-               return err;
-
-       return rtsx_pci_write_phy_register(pcr, addr, (val & mask) | append);
-}
-
-#endif
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/mfd/rtsx_usb.h
deleted file mode 100644 (file)
index c446e4f..0000000
+++ /dev/null
@@ -1,628 +0,0 @@
-/* Driver for Realtek RTS5139 USB card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- *   Roger Tseng <rogerable@realtek.com>
- */
-
-#ifndef __RTSX_USB_H
-#define __RTSX_USB_H
-
-#include <linux/usb.h>
-
-/* related module names */
-#define RTSX_USB_SD_CARD       0
-#define RTSX_USB_MS_CARD       1
-
-/* endpoint numbers */
-#define EP_BULK_OUT            1
-#define EP_BULK_IN             2
-#define EP_INTR_IN             3
-
-/* USB vendor requests */
-#define RTSX_USB_REQ_REG_OP    0x00
-#define RTSX_USB_REQ_POLL      0x02
-
-/* miscellaneous parameters */
-#define MIN_DIV_N              60
-#define MAX_DIV_N              120
-
-#define MAX_PHASE              15
-#define RX_TUNING_CNT          3
-
-#define QFN24                  0
-#define LQFP48                 1
-#define CHECK_PKG(ucr, pkg)    ((ucr)->package == (pkg))
-
-/* data structures */
-struct rtsx_ucr {
-       u16                     vendor_id;
-       u16                     product_id;
-
-       int                     package;
-       u8                      ic_version;
-       bool                    is_rts5179;
-
-       unsigned int            cur_clk;
-
-       u8                      *cmd_buf;
-       unsigned int            cmd_idx;
-       u8                      *rsp_buf;
-
-       struct usb_device       *pusb_dev;
-       struct usb_interface    *pusb_intf;
-       struct usb_sg_request   current_sg;
-       unsigned char           *iobuf;
-       dma_addr_t              iobuf_dma;
-
-       struct timer_list       sg_timer;
-       struct mutex            dev_mutex;
-};
-
-/* buffer size */
-#define IOBUF_SIZE             1024
-
-/* prototypes of exported functions */
-extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status);
-
-extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data);
-extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask,
-               u8 data);
-
-extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask,
-               u8 data);
-extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr,
-               u8 *data);
-
-extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type,
-               u16 reg_addr, u8 mask, u8 data);
-extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout);
-extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout);
-extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
-                             void *buf, unsigned int len, int use_sg,
-                             unsigned int *act_len, int timeout);
-
-extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len);
-extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len);
-extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock,
-               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
-extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card);
-
-/* card status */
-#define SD_CD          0x01
-#define MS_CD          0x02
-#define XD_CD          0x04
-#define CD_MASK                (SD_CD | MS_CD | XD_CD)
-#define SD_WP          0x08
-
-/* reader command field offset & parameters */
-#define READ_REG_CMD           0
-#define WRITE_REG_CMD          1
-#define CHECK_REG_CMD          2
-
-#define PACKET_TYPE            4
-#define CNT_H                  5
-#define CNT_L                  6
-#define STAGE_FLAG             7
-#define CMD_OFFSET             8
-#define SEQ_WRITE_DATA_OFFSET  12
-
-#define BATCH_CMD              0
-#define SEQ_READ               1
-#define SEQ_WRITE              2
-
-#define STAGE_R                        0x01
-#define STAGE_DI               0x02
-#define STAGE_DO               0x04
-#define STAGE_MS_STATUS                0x08
-#define STAGE_XD_STATUS                0x10
-#define MODE_C                 0x00
-#define MODE_CR                        (STAGE_R)
-#define MODE_CDIR              (STAGE_R | STAGE_DI)
-#define MODE_CDOR              (STAGE_R | STAGE_DO)
-
-#define EP0_OP_SHIFT           14
-#define EP0_READ_REG_CMD       2
-#define EP0_WRITE_REG_CMD      3
-
-#define rtsx_usb_cmd_hdr_tag(ucr)              \
-       do {                                    \
-               ucr->cmd_buf[0] = 'R';          \
-               ucr->cmd_buf[1] = 'T';          \
-               ucr->cmd_buf[2] = 'C';          \
-               ucr->cmd_buf[3] = 'R';          \
-       } while (0)
-
-static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr)
-{
-       rtsx_usb_cmd_hdr_tag(ucr);
-       ucr->cmd_idx = 0;
-       ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD;
-}
-
-/* internal register address */
-#define FPDCTL                         0xFC00
-#define SSC_DIV_N_0                    0xFC07
-#define SSC_CTL1                       0xFC09
-#define SSC_CTL2                       0xFC0A
-#define CFG_MODE                       0xFC0E
-#define CFG_MODE_1                     0xFC0F
-#define RCCTL                          0xFC14
-#define SOF_WDOG                       0xFC28
-#define SYS_DUMMY0                     0xFC30
-
-#define MS_BLKEND                      0xFD30
-#define MS_READ_START                  0xFD31
-#define MS_READ_COUNT                  0xFD32
-#define MS_WRITE_START                 0xFD33
-#define MS_WRITE_COUNT                 0xFD34
-#define MS_COMMAND                     0xFD35
-#define MS_OLD_BLOCK_0                 0xFD36
-#define MS_OLD_BLOCK_1                 0xFD37
-#define MS_NEW_BLOCK_0                 0xFD38
-#define MS_NEW_BLOCK_1                 0xFD39
-#define MS_LOG_BLOCK_0                 0xFD3A
-#define MS_LOG_BLOCK_1                 0xFD3B
-#define MS_BUS_WIDTH                   0xFD3C
-#define MS_PAGE_START                  0xFD3D
-#define MS_PAGE_LENGTH                 0xFD3E
-#define MS_CFG                         0xFD40
-#define MS_TPC                         0xFD41
-#define MS_TRANS_CFG                   0xFD42
-#define MS_TRANSFER                    0xFD43
-#define MS_INT_REG                     0xFD44
-#define MS_BYTE_CNT                    0xFD45
-#define MS_SECTOR_CNT_L                        0xFD46
-#define MS_SECTOR_CNT_H                        0xFD47
-#define MS_DBUS_H                      0xFD48
-
-#define CARD_DMA1_CTL                  0xFD5C
-#define CARD_PULL_CTL1                 0xFD60
-#define CARD_PULL_CTL2                 0xFD61
-#define CARD_PULL_CTL3                 0xFD62
-#define CARD_PULL_CTL4                 0xFD63
-#define CARD_PULL_CTL5                 0xFD64
-#define CARD_PULL_CTL6                 0xFD65
-#define CARD_EXIST                     0xFD6F
-#define CARD_INT_PEND                  0xFD71
-
-#define LDO_POWER_CFG                  0xFD7B
-
-#define SD_CFG1                                0xFDA0
-#define SD_CFG2                                0xFDA1
-#define SD_CFG3                                0xFDA2
-#define SD_STAT1                       0xFDA3
-#define SD_STAT2                       0xFDA4
-#define SD_BUS_STAT                    0xFDA5
-#define SD_PAD_CTL                     0xFDA6
-#define SD_SAMPLE_POINT_CTL            0xFDA7
-#define SD_PUSH_POINT_CTL              0xFDA8
-#define SD_CMD0                                0xFDA9
-#define SD_CMD1                                0xFDAA
-#define SD_CMD2                                0xFDAB
-#define SD_CMD3                                0xFDAC
-#define SD_CMD4                                0xFDAD
-#define SD_CMD5                                0xFDAE
-#define SD_BYTE_CNT_L                  0xFDAF
-#define SD_BYTE_CNT_H                  0xFDB0
-#define SD_BLOCK_CNT_L                 0xFDB1
-#define SD_BLOCK_CNT_H                 0xFDB2
-#define SD_TRANSFER                    0xFDB3
-#define SD_CMD_STATE                   0xFDB5
-#define SD_DATA_STATE                  0xFDB6
-#define SD_VPCLK0_CTL                  0xFC2A
-#define SD_VPCLK1_CTL                  0xFC2B
-#define SD_DCMPS0_CTL                  0xFC2C
-#define SD_DCMPS1_CTL                  0xFC2D
-
-#define CARD_DMA1_CTL                  0xFD5C
-
-#define HW_VERSION                     0xFC01
-
-#define SSC_CLK_FPGA_SEL               0xFC02
-#define CLK_DIV                                0xFC03
-#define SFSM_ED                                0xFC04
-
-#define CD_DEGLITCH_WIDTH              0xFC20
-#define CD_DEGLITCH_EN                 0xFC21
-#define AUTO_DELINK_EN                 0xFC23
-
-#define FPGA_PULL_CTL                  0xFC1D
-#define CARD_CLK_SOURCE                        0xFC2E
-
-#define CARD_SHARE_MODE                        0xFD51
-#define CARD_DRIVE_SEL                 0xFD52
-#define CARD_STOP                      0xFD53
-#define CARD_OE                                0xFD54
-#define CARD_AUTO_BLINK                        0xFD55
-#define CARD_GPIO                      0xFD56
-#define SD30_DRIVE_SEL                 0xFD57
-
-#define CARD_DATA_SOURCE               0xFD5D
-#define CARD_SELECT                    0xFD5E
-
-#define CARD_CLK_EN                    0xFD79
-#define CARD_PWR_CTL                   0xFD7A
-
-#define OCPCTL                         0xFD80
-#define OCPPARA1                       0xFD81
-#define OCPPARA2                       0xFD82
-#define OCPSTAT                                0xFD83
-
-#define HS_USB_STAT                    0xFE01
-#define HS_VCONTROL                    0xFE26
-#define HS_VSTAIN                      0xFE27
-#define HS_VLOADM                      0xFE28
-#define HS_VSTAOUT                     0xFE29
-
-#define MC_IRQ                         0xFF00
-#define MC_IRQEN                       0xFF01
-#define MC_FIFO_CTL                    0xFF02
-#define MC_FIFO_BC0                    0xFF03
-#define MC_FIFO_BC1                    0xFF04
-#define MC_FIFO_STAT                   0xFF05
-#define MC_FIFO_MODE                   0xFF06
-#define MC_FIFO_RD_PTR0                        0xFF07
-#define MC_FIFO_RD_PTR1                        0xFF08
-#define MC_DMA_CTL                     0xFF10
-#define MC_DMA_TC0                     0xFF11
-#define MC_DMA_TC1                     0xFF12
-#define MC_DMA_TC2                     0xFF13
-#define MC_DMA_TC3                     0xFF14
-#define MC_DMA_RST                     0xFF15
-
-#define RBUF_SIZE_MASK                 0xFBFF
-#define RBUF_BASE                      0xF000
-#define PPBUF_BASE1                    0xF800
-#define PPBUF_BASE2                    0xFA00
-
-/* internal register value macros */
-#define POWER_OFF                      0x03
-#define PARTIAL_POWER_ON               0x02
-#define POWER_ON                       0x00
-#define POWER_MASK                     0x03
-#define LDO3318_PWR_MASK               0x0C
-#define LDO_ON                         0x00
-#define LDO_SUSPEND                    0x08
-#define LDO_OFF                                0x0C
-#define DV3318_AUTO_PWR_OFF            0x10
-#define FORCE_LDO_POWERB               0x60
-
-/* LDO_POWER_CFG */
-#define TUNE_SD18_MASK                 0x1C
-#define TUNE_SD18_1V7                  0x00
-#define TUNE_SD18_1V8                  (0x01 << 2)
-#define TUNE_SD18_1V9                  (0x02 << 2)
-#define TUNE_SD18_2V0                  (0x03 << 2)
-#define TUNE_SD18_2V7                  (0x04 << 2)
-#define TUNE_SD18_2V8                  (0x05 << 2)
-#define TUNE_SD18_2V9                  (0x06 << 2)
-#define TUNE_SD18_3V3                  (0x07 << 2)
-
-/* CLK_DIV */
-#define CLK_CHANGE                     0x80
-#define CLK_DIV_1                      0x00
-#define CLK_DIV_2                      0x01
-#define CLK_DIV_4                      0x02
-#define CLK_DIV_8                      0x03
-
-#define SSC_POWER_MASK                 0x01
-#define SSC_POWER_DOWN                 0x01
-#define SSC_POWER_ON                   0x00
-
-#define FPGA_VER                       0x80
-#define HW_VER_MASK                    0x0F
-
-#define EXTEND_DMA1_ASYNC_SIGNAL       0x02
-
-/* CFG_MODE*/
-#define XTAL_FREE                      0x80
-#define CLK_MODE_MASK                  0x03
-#define CLK_MODE_12M_XTAL              0x00
-#define CLK_MODE_NON_XTAL              0x01
-#define CLK_MODE_24M_OSC               0x02
-#define CLK_MODE_48M_OSC               0x03
-
-/* CFG_MODE_1*/
-#define RTS5179                                0x02
-
-#define NYET_EN                                0x01
-#define NYET_MSAK                      0x01
-
-#define SD30_DRIVE_MASK                        0x07
-#define SD20_DRIVE_MASK                        0x03
-
-#define DISABLE_SD_CD                  0x08
-#define DISABLE_MS_CD                  0x10
-#define DISABLE_XD_CD                  0x20
-#define SD_CD_DEGLITCH_EN              0x01
-#define MS_CD_DEGLITCH_EN              0x02
-#define XD_CD_DEGLITCH_EN              0x04
-
-#define        CARD_SHARE_LQFP48               0x04
-#define        CARD_SHARE_QFN24                0x00
-#define CARD_SHARE_LQFP_SEL            0x04
-#define        CARD_SHARE_XD                   0x00
-#define        CARD_SHARE_SD                   0x01
-#define        CARD_SHARE_MS                   0x02
-#define CARD_SHARE_MASK                        0x03
-
-
-/* SD30_DRIVE_SEL */
-#define DRIVER_TYPE_A                  0x05
-#define DRIVER_TYPE_B                  0x03
-#define DRIVER_TYPE_C                  0x02
-#define DRIVER_TYPE_D                  0x01
-
-/* SD_BUS_STAT */
-#define        SD_CLK_TOGGLE_EN                0x80
-#define        SD_CLK_FORCE_STOP               0x40
-#define        SD_DAT3_STATUS                  0x10
-#define        SD_DAT2_STATUS                  0x08
-#define        SD_DAT1_STATUS                  0x04
-#define        SD_DAT0_STATUS                  0x02
-#define        SD_CMD_STATUS                   0x01
-
-/* SD_PAD_CTL */
-#define        SD_IO_USING_1V8                 0x80
-#define        SD_IO_USING_3V3                 0x7F
-#define        TYPE_A_DRIVING                  0x00
-#define        TYPE_B_DRIVING                  0x01
-#define        TYPE_C_DRIVING                  0x02
-#define        TYPE_D_DRIVING                  0x03
-
-/* CARD_CLK_EN */
-#define SD_CLK_EN                      0x04
-#define MS_CLK_EN                      0x08
-
-/* CARD_SELECT */
-#define SD_MOD_SEL                     2
-#define MS_MOD_SEL                     3
-
-/* CARD_SHARE_MODE */
-#define        CARD_SHARE_LQFP48               0x04
-#define        CARD_SHARE_QFN24                0x00
-#define CARD_SHARE_LQFP_SEL            0x04
-#define        CARD_SHARE_XD                   0x00
-#define        CARD_SHARE_SD                   0x01
-#define        CARD_SHARE_MS                   0x02
-#define CARD_SHARE_MASK                        0x03
-
-/* SSC_CTL1 */
-#define SSC_RSTB                       0x80
-#define SSC_8X_EN                      0x40
-#define SSC_FIX_FRAC                   0x20
-#define SSC_SEL_1M                     0x00
-#define SSC_SEL_2M                     0x08
-#define SSC_SEL_4M                     0x10
-#define SSC_SEL_8M                     0x18
-
-/* SSC_CTL2 */
-#define SSC_DEPTH_MASK                 0x03
-#define SSC_DEPTH_DISALBE              0x00
-#define SSC_DEPTH_2M                   0x01
-#define SSC_DEPTH_1M                   0x02
-#define SSC_DEPTH_512K                 0x03
-
-/* SD_VPCLK0_CTL */
-#define PHASE_CHANGE                   0x80
-#define PHASE_NOT_RESET                        0x40
-
-/* SD_TRANSFER */
-#define        SD_TRANSFER_START               0x80
-#define        SD_TRANSFER_END                 0x40
-#define SD_STAT_IDLE                   0x20
-#define        SD_TRANSFER_ERR                 0x10
-#define        SD_TM_NORMAL_WRITE              0x00
-#define        SD_TM_AUTO_WRITE_3              0x01
-#define        SD_TM_AUTO_WRITE_4              0x02
-#define        SD_TM_AUTO_READ_3               0x05
-#define        SD_TM_AUTO_READ_4               0x06
-#define        SD_TM_CMD_RSP                   0x08
-#define        SD_TM_AUTO_WRITE_1              0x09
-#define        SD_TM_AUTO_WRITE_2              0x0A
-#define        SD_TM_NORMAL_READ               0x0C
-#define        SD_TM_AUTO_READ_1               0x0D
-#define        SD_TM_AUTO_READ_2               0x0E
-#define        SD_TM_AUTO_TUNING               0x0F
-
-/* SD_CFG1 */
-#define SD_CLK_DIVIDE_0                        0x00
-#define        SD_CLK_DIVIDE_256               0xC0
-#define        SD_CLK_DIVIDE_128               0x80
-#define SD_CLK_DIVIDE_MASK             0xC0
-#define        SD_BUS_WIDTH_1BIT               0x00
-#define        SD_BUS_WIDTH_4BIT               0x01
-#define        SD_BUS_WIDTH_8BIT               0x02
-#define        SD_ASYNC_FIFO_RST               0x10
-#define        SD_20_MODE                      0x00
-#define        SD_DDR_MODE                     0x04
-#define        SD_30_MODE                      0x08
-
-/* SD_CFG2 */
-#define        SD_CALCULATE_CRC7               0x00
-#define        SD_NO_CALCULATE_CRC7            0x80
-#define        SD_CHECK_CRC16                  0x00
-#define        SD_NO_CHECK_CRC16               0x40
-#define SD_WAIT_CRC_TO_EN              0x20
-#define        SD_WAIT_BUSY_END                0x08
-#define        SD_NO_WAIT_BUSY_END             0x00
-#define        SD_CHECK_CRC7                   0x00
-#define        SD_NO_CHECK_CRC7                0x04
-#define        SD_RSP_LEN_0                    0x00
-#define        SD_RSP_LEN_6                    0x01
-#define        SD_RSP_LEN_17                   0x02
-#define        SD_RSP_TYPE_R0                  0x04
-#define        SD_RSP_TYPE_R1                  0x01
-#define        SD_RSP_TYPE_R1b                 0x09
-#define        SD_RSP_TYPE_R2                  0x02
-#define        SD_RSP_TYPE_R3                  0x05
-#define        SD_RSP_TYPE_R4                  0x05
-#define        SD_RSP_TYPE_R5                  0x01
-#define        SD_RSP_TYPE_R6                  0x01
-#define        SD_RSP_TYPE_R7                  0x01
-
-/* SD_STAT1 */
-#define        SD_CRC7_ERR                     0x80
-#define        SD_CRC16_ERR                    0x40
-#define        SD_CRC_WRITE_ERR                0x20
-#define        SD_CRC_WRITE_ERR_MASK           0x1C
-#define        GET_CRC_TIME_OUT                0x02
-#define        SD_TUNING_COMPARE_ERR           0x01
-
-/* SD_DATA_STATE */
-#define SD_DATA_IDLE                   0x80
-
-/* CARD_DATA_SOURCE */
-#define PINGPONG_BUFFER                        0x01
-#define RING_BUFFER                    0x00
-
-/* CARD_OE */
-#define SD_OUTPUT_EN                   0x04
-#define MS_OUTPUT_EN                   0x08
-
-/* CARD_STOP */
-#define SD_STOP                                0x04
-#define MS_STOP                                0x08
-#define SD_CLR_ERR                     0x40
-#define MS_CLR_ERR                     0x80
-
-/* CARD_CLK_SOURCE */
-#define CRC_FIX_CLK                    (0x00 << 0)
-#define CRC_VAR_CLK0                   (0x01 << 0)
-#define CRC_VAR_CLK1                   (0x02 << 0)
-#define SD30_FIX_CLK                   (0x00 << 2)
-#define SD30_VAR_CLK0                  (0x01 << 2)
-#define SD30_VAR_CLK1                  (0x02 << 2)
-#define SAMPLE_FIX_CLK                 (0x00 << 4)
-#define SAMPLE_VAR_CLK0                        (0x01 << 4)
-#define SAMPLE_VAR_CLK1                        (0x02 << 4)
-
-/* SD_SAMPLE_POINT_CTL */
-#define        DDR_FIX_RX_DAT                  0x00
-#define        DDR_VAR_RX_DAT                  0x80
-#define        DDR_FIX_RX_DAT_EDGE             0x00
-#define        DDR_FIX_RX_DAT_14_DELAY         0x40
-#define        DDR_FIX_RX_CMD                  0x00
-#define        DDR_VAR_RX_CMD                  0x20
-#define        DDR_FIX_RX_CMD_POS_EDGE         0x00
-#define        DDR_FIX_RX_CMD_14_DELAY         0x10
-#define        SD20_RX_POS_EDGE                0x00
-#define        SD20_RX_14_DELAY                0x08
-#define SD20_RX_SEL_MASK               0x08
-
-/* SD_PUSH_POINT_CTL */
-#define        DDR_FIX_TX_CMD_DAT              0x00
-#define        DDR_VAR_TX_CMD_DAT              0x80
-#define        DDR_FIX_TX_DAT_14_TSU           0x00
-#define        DDR_FIX_TX_DAT_12_TSU           0x40
-#define        DDR_FIX_TX_CMD_NEG_EDGE         0x00
-#define        DDR_FIX_TX_CMD_14_AHEAD         0x20
-#define        SD20_TX_NEG_EDGE                0x00
-#define        SD20_TX_14_AHEAD                0x10
-#define SD20_TX_SEL_MASK               0x10
-#define        DDR_VAR_SDCLK_POL_SWAP          0x01
-
-/* MS_CFG */
-#define        SAMPLE_TIME_RISING              0x00
-#define        SAMPLE_TIME_FALLING             0x80
-#define        PUSH_TIME_DEFAULT               0x00
-#define        PUSH_TIME_ODD                   0x40
-#define        NO_EXTEND_TOGGLE                0x00
-#define        EXTEND_TOGGLE_CHK               0x20
-#define        MS_BUS_WIDTH_1                  0x00
-#define        MS_BUS_WIDTH_4                  0x10
-#define        MS_BUS_WIDTH_8                  0x18
-#define        MS_2K_SECTOR_MODE               0x04
-#define        MS_512_SECTOR_MODE              0x00
-#define        MS_TOGGLE_TIMEOUT_EN            0x00
-#define        MS_TOGGLE_TIMEOUT_DISEN         0x01
-#define MS_NO_CHECK_INT                        0x02
-
-/* MS_TRANS_CFG */
-#define        WAIT_INT                        0x80
-#define        NO_WAIT_INT                     0x00
-#define        NO_AUTO_READ_INT_REG            0x00
-#define        AUTO_READ_INT_REG               0x40
-#define        MS_CRC16_ERR                    0x20
-#define        MS_RDY_TIMEOUT                  0x10
-#define        MS_INT_CMDNK                    0x08
-#define        MS_INT_BREQ                     0x04
-#define        MS_INT_ERR                      0x02
-#define        MS_INT_CED                      0x01
-
-/* MS_TRANSFER */
-#define        MS_TRANSFER_START               0x80
-#define        MS_TRANSFER_END                 0x40
-#define        MS_TRANSFER_ERR                 0x20
-#define        MS_BS_STATE                     0x10
-#define        MS_TM_READ_BYTES                0x00
-#define        MS_TM_NORMAL_READ               0x01
-#define        MS_TM_WRITE_BYTES               0x04
-#define        MS_TM_NORMAL_WRITE              0x05
-#define        MS_TM_AUTO_READ                 0x08
-#define        MS_TM_AUTO_WRITE                0x0C
-#define MS_TM_SET_CMD                  0x06
-#define MS_TM_COPY_PAGE                        0x07
-#define MS_TM_MULTI_READ               0x02
-#define MS_TM_MULTI_WRITE              0x03
-
-/* MC_FIFO_CTL */
-#define FIFO_FLUSH                     0x01
-
-/* MC_DMA_RST */
-#define DMA_RESET  0x01
-
-/* MC_DMA_CTL */
-#define DMA_TC_EQ_0                    0x80
-#define DMA_DIR_TO_CARD                        0x00
-#define DMA_DIR_FROM_CARD              0x02
-#define DMA_EN                         0x01
-#define DMA_128                                (0 << 2)
-#define DMA_256                                (1 << 2)
-#define DMA_512                                (2 << 2)
-#define DMA_1024                       (3 << 2)
-#define DMA_PACK_SIZE_MASK             0x0C
-
-/* CARD_INT_PEND */
-#define XD_INT                         0x10
-#define MS_INT                         0x08
-#define SD_INT                         0x04
-
-/* LED operations*/
-static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr)
-{
-       return  rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02);
-}
-
-static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr)
-{
-       return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03);
-}
-
-/* HW error clearing */
-static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr)
-{
-       rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8);
-}
-
-static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr)
-{
-       rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL,
-                       FIFO_FLUSH, FIFO_FLUSH);
-       rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET);
-}
-#endif /* __RTS51139_H */
index 77c7cf4..605f622 100644 (file)
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * STM32 Low-Power Timer parent driver.
- *
  * Copyright (C) STMicroelectronics 2017
- *
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
  * Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms:  GNU General Public License (GPL), version 2
  */
 
 #ifndef _LINUX_STM32_LPTIMER_H_
index ce7346e..2aadab6 100644 (file)
@@ -1,9 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) STMicroelectronics 2016
- *
  * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms:  GNU General Public License (GPL), version 2
  */
 
 #ifndef _LINUX_STM32_GPTIMER_H_
index e1cfe91..396a103 100644 (file)
                writew((val) >> 16, (addr) + 2); \
        } while (0)
 
-#define CNF_CMD     0x04
-#define CNF_CTL_BASE   0x10
-#define CNF_INT_PIN  0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define   SDCREN 0x2   /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
 #define sd_config_write8(base, shift, reg, val) \
        tmio_iowrite8((val), (base) + ((reg) << (shift)))
 #define sd_config_write16(base, shift, reg, val) \
index 895ec0c..a2246cf 100644 (file)
@@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,
        new_page = __alloc_pages_nodemask(gfp_mask, order,
                                preferred_nid, nodemask);
 
-       if (new_page && PageTransHuge(page))
+       if (new_page && PageTransHuge(new_page))
                prep_transhuge_page(new_page);
 
        return new_page;
index a886b51..a061042 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/kernel.h>
 #include <linux/completion.h>
 #include <linux/pci.h>
+#include <linux/irq.h>
 #include <linux/spinlock_types.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
@@ -556,6 +557,7 @@ struct mlx5_core_sriov {
 };
 
 struct mlx5_irq_info {
+       cpumask_var_t mask;
        char name[MLX5_MAX_IRQ_NAME];
 };
 
@@ -1048,7 +1050,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                       enum mlx5_eq_type type);
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
-int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+void mlx5_stop_eqs(struct mlx5_core_dev *dev);
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
                    unsigned int *irqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1164,6 +1166,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+                                u64 *values,
+                                int num_counters,
+                                size_t *offsets);
 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
 
@@ -1226,7 +1232,23 @@ enum {
 static inline const struct cpumask *
 mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
 {
-       return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
+       const struct cpumask *mask;
+       struct irq_desc *desc;
+       unsigned int irq;
+       int eqn;
+       int err;
+
+       err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
+       if (err)
+               return NULL;
+
+       desc = irq_to_desc(irq);
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+       mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
+#else
+       mask = desc->irq_common_data.affinity;
+#endif
+       return mask;
 }
 
 #endif /* MLX5_DRIVER_H */
index 38a7577..1391a82 100644 (file)
@@ -147,7 +147,7 @@ enum {
        MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
        MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
        MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
-       MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
+       MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,
        MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
        MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
        MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783,
@@ -1027,8 +1027,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         log_max_wq_sz[0x5];
 
        u8         nic_vport_change_event[0x1];
-       u8         disable_local_lb[0x1];
-       u8         reserved_at_3e2[0x9];
+       u8         disable_local_lb_uc[0x1];
+       u8         disable_local_lb_mc[0x1];
+       u8         reserved_at_3e3[0x8];
        u8         log_max_vlan_list[0x5];
        u8         reserved_at_3f0[0x3];
        u8         log_max_current_mc_list[0x5];
@@ -7239,7 +7240,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
        u8         vxlan_udp_port[0x10];
 };
 
-struct mlx5_ifc_set_rate_limit_out_bits {
+struct mlx5_ifc_set_pp_rate_limit_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
 
@@ -7248,7 +7249,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
        u8         reserved_at_40[0x40];
 };
 
-struct mlx5_ifc_set_rate_limit_in_bits {
+struct mlx5_ifc_set_pp_rate_limit_in_bits {
        u8         opcode[0x10];
        u8         reserved_at_10[0x10];
 
@@ -7261,6 +7262,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
        u8         reserved_at_60[0x20];
 
        u8         rate_limit[0x20];
+
+       u8         reserved_at_a0[0x160];
 };
 
 struct mlx5_ifc_access_register_out_bits {
index ee07314..ea818ff 100644 (file)
@@ -377,6 +377,7 @@ enum page_entry_size {
 struct vm_operations_struct {
        void (*open)(struct vm_area_struct * area);
        void (*close)(struct vm_area_struct * area);
+       int (*split)(struct vm_area_struct * area, unsigned long addr);
        int (*mremap)(struct vm_area_struct * area);
        int (*fault)(struct vm_fault *vmf);
        int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
@@ -1379,6 +1380,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                    unsigned int gup_flags, struct page **pages, int *locked);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                    struct page **pages, unsigned int gup_flags);
+#ifdef CONFIG_FS_DAX
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+                           unsigned int gup_flags, struct page **pages,
+                           struct vm_area_struct **vmas);
+#else
+static inline long get_user_pages_longterm(unsigned long start,
+               unsigned long nr_pages, unsigned int gup_flags,
+               struct page **pages, struct vm_area_struct **vmas)
+{
+       return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+}
+#endif /* CONFIG_FS_DAX */
+
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
 
index 49b4257..f3075d6 100644 (file)
@@ -85,7 +85,7 @@ struct netlink_ext_ack {
  * to the lack of an output buffer.)
  */
 #define NL_SET_ERR_MSG(extack, msg) do {               \
-       static const char __msg[] = (msg);              \
+       static const char __msg[] = msg;                \
        struct netlink_ext_ack *__extack = (extack);    \
                                                        \
        if (__extack)                                   \
@@ -101,7 +101,7 @@ struct netlink_ext_ack {
 } while (0)
 
 #define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do {    \
-       static const char __msg[] = (msg);              \
+       static const char __msg[] = msg;                \
        struct netlink_ext_ack *__extack = (extack);    \
                                                        \
        if (__extack) {                                 \
index 01c91d8..5bad038 100644 (file)
@@ -66,6 +66,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
        return tsk->signal->oom_mm;
 }
 
+/*
+ * Use this helper if tsk->mm != mm and the victim mm needs a special
+ * handling. This is guaranteed to stay true after once set.
+ */
+static inline bool mm_is_oom_victim(struct mm_struct *mm)
+{
+       return test_bit(MMF_OOM_VICTIM, &mm->flags);
+}
+
 /*
  * Checks whether a page fault on the given mm is still reliable.
  * This is no longer true if the oom reaper started to reap the
index 0403894..c170c92 100644 (file)
@@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
                                                unsigned int devfn)
 { return NULL; }
+static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
+                                       unsigned int bus, unsigned int devfn)
+{ return NULL; }
 
 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
index 2c9c87d..7546822 100644 (file)
@@ -15,6 +15,7 @@
 #define _LINUX_PERF_EVENT_H
 
 #include <uapi/linux/perf_event.h>
+#include <uapi/linux/bpf_perf_event.h>
 
 /*
  * Kernel-internal data types and definitions:
@@ -787,7 +788,7 @@ struct perf_output_handle {
 };
 
 struct bpf_perf_event_data_kern {
-       struct pt_regs *regs;
+       bpf_user_pt_regs_t *regs;
        struct perf_sample_data *data;
        struct perf_event *event;
 };
@@ -1177,6 +1178,9 @@ extern void perf_bp_event(struct perf_event *event, void *data);
                (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
 # define perf_instruction_pointer(regs)        instruction_pointer(regs)
 #endif
+#ifndef perf_arch_bpf_user_pt_regs
+# define perf_arch_bpf_user_pt_regs(regs) regs
+#endif
 
 static inline bool has_branch_stack(struct perf_event *event)
 {
index 65d3911..e723b78 100644 (file)
@@ -556,9 +556,10 @@ struct pm_subsys_data {
  * These flags can be set by device drivers at the probe time.  They need not be
  * cleared by the drivers as the driver core will take care of that.
  *
- * NEVER_SKIP: Do not skip system suspend/resume callbacks for the device.
+ * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device.
  * SMART_PREPARE: Check the return value of the driver's ->prepare callback.
  * SMART_SUSPEND: No need to resume the device from runtime suspend.
+ * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible.
  *
  * Setting SMART_PREPARE instructs bus types and PM domains which may want
  * system suspend/resume callbacks to be skipped for the device to return 0 from
@@ -572,10 +573,14 @@ struct pm_subsys_data {
  * necessary from the driver's perspective.  It also may cause them to skip
  * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
  * the driver if they decide to leave the device in runtime suspend.
+ *
+ * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the
+ * driver prefers the device to be left in suspend after system resume.
  */
-#define DPM_FLAG_NEVER_SKIP    BIT(0)
-#define DPM_FLAG_SMART_PREPARE BIT(1)
-#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_NEVER_SKIP            BIT(0)
+#define DPM_FLAG_SMART_PREPARE         BIT(1)
+#define DPM_FLAG_SMART_SUSPEND         BIT(2)
+#define DPM_FLAG_LEAVE_SUSPENDED       BIT(3)
 
 struct dev_pm_info {
        pm_message_t            power_state;
@@ -597,6 +602,8 @@ struct dev_pm_info {
        bool                    wakeup_path:1;
        bool                    syscore:1;
        bool                    no_pm_callbacks:1;      /* Owned by the PM core */
+       unsigned int            must_resume:1;  /* Owned by the PM core */
+       unsigned int            may_skip_resume:1;      /* Set by subsystems */
 #else
        unsigned int            should_wakeup:1;
 #endif
@@ -765,6 +772,8 @@ extern int pm_generic_poweroff_late(struct device *dev);
 extern int pm_generic_poweroff(struct device *dev);
 extern void pm_generic_complete(struct device *dev);
 
+extern void dev_pm_skip_next_resume_phases(struct device *dev);
+extern bool dev_pm_may_skip_resume(struct device *dev);
 extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
 
 #else /* !CONFIG_PM_SLEEP */
index 4c2cba7..4238dde 100644 (file)
@@ -88,6 +88,11 @@ static inline bool device_may_wakeup(struct device *dev)
        return dev->power.can_wakeup && !!dev->power.wakeup;
 }
 
+static inline void device_set_wakeup_path(struct device *dev)
+{
+       dev->power.wakeup_path = true;
+}
+
 /* drivers/base/power/wakeup.c */
 extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
 extern struct wakeup_source *wakeup_source_create(const char *name);
@@ -174,6 +179,8 @@ static inline bool device_may_wakeup(struct device *dev)
        return dev->power.can_wakeup && dev->power.should_wakeup;
 }
 
+static inline void device_set_wakeup_path(struct device *dev) {}
+
 static inline void __pm_stay_awake(struct wakeup_source *ws) {}
 
 static inline void pm_stay_awake(struct device *dev) {}
index b3ea01a..0174883 100644 (file)
@@ -1,43 +1,11 @@
-/*
- *  Copyright (C) Intel 2011
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- *
- * This header file will allow other parts of the OS to use the
- * interface to write out it's contents for debugging a mobile system.
- */
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _INCLUDE_PTI_H
+#define _INCLUDE_PTI_H
 
-#ifndef PTI_H_
-#define PTI_H_
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#include <asm/pti.h>
+#else
+static inline void pti_init(void) { }
+#endif
 
-/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
-#define PTI_LASTDWORD_DTS      0x30
-
-/* basic structure used as a write address to the PTI HW */
-struct pti_masterchannel {
-       u8 master;
-       u8 channel;
-};
-
-/* the following functions are defined in misc/pti.c */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
-struct pti_masterchannel *pti_request_masterchannel(u8 type,
-                                                   const char *thread_name);
-void pti_release_masterchannel(struct pti_masterchannel *mc);
-
-#endif /*PTI_H_*/
+#endif
index 37b4bb2..d72b2e7 100644 (file)
@@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
 
 /* Note: callers invoking this in a loop must use a compiler barrier,
  * for example cpu_relax(). Callers must hold producer_lock.
+ * Callers are responsible for making sure pointer that is being queued
+ * points to a valid data.
  */
 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
 {
        if (unlikely(!r->size) || r->queue[r->producer])
                return -ENOSPC;
 
+       /* Make sure the pointer we are storing points to a valid data. */
+       /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
+       smp_wmb();
+
        r->queue[r->producer++] = ptr;
        if (unlikely(r->producer >= r->size))
                r->producer = 0;
@@ -168,6 +174,15 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
  * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
  * If ring is never resized, and if the pointer is merely
  * tested, there's no need to take the lock - see e.g.  __ptr_ring_empty.
+ * However, if called outside the lock, and if some other CPU
+ * consumes ring entries at the same time, the value returned
+ * is not guaranteed to be correct.
+ * In this case - to avoid incorrectly detecting the ring
+ * as empty - the CPU consuming the ring entries is responsible
+ * for either consuming all ring entries until the ring is empty,
+ * or synchronizing with some other CPU and causing it to
+ * execute __ptr_ring_peek and/or consume the ring enteries
+ * after the synchronization point.
  */
 static inline void *__ptr_ring_peek(struct ptr_ring *r)
 {
@@ -176,10 +191,7 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r)
        return NULL;
 }
 
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if the ring is ever resized - see e.g. ptr_ring_empty.
- */
+/* See __ptr_ring_peek above for locking rules. */
 static inline bool __ptr_ring_empty(struct ptr_ring *r)
 {
        return !__ptr_ring_peek(r);
@@ -275,6 +287,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
        if (ptr)
                __ptr_ring_discard_one(r);
 
+       /* Make sure anyone accessing data through the pointer is up to date. */
+       /* Pairs with smp_wmb in __ptr_ring_produce. */
+       smp_read_barrier_depends();
        return ptr;
 }
 
index d574361..fcbeed4 100644 (file)
@@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
                            struct rb_root *root);
 extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
                                struct rb_root *root);
+extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+                                  struct rb_root_cached *root);
 
 static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
                                struct rb_node **rb_link)
index a328e81..e4b257f 100644 (file)
@@ -100,44 +100,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
                first->pprev = &n->next;
 }
 
-/**
- * hlist_nulls_add_tail_rcu
- * @n: the element to add to the hash list.
- * @h: the list to add to.
- *
- * Description:
- * Adds the specified element to the end of the specified hlist_nulls,
- * while permitting racing traversals.  NOTE: tail insertion requires
- * list traversal.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
- * or hlist_nulls_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs.  Regardless of the type of CPU, the
- * list-traversal primitive must be guarded by rcu_read_lock().
- */
-static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
-                                       struct hlist_nulls_head *h)
-{
-       struct hlist_nulls_node *i, *last = NULL;
-
-       for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
-            i = hlist_nulls_next_rcu(i))
-               last = i;
-
-       if (last) {
-               n->next = last->next;
-               n->pprev = &last->next;
-               rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
-       } else {
-               hlist_nulls_add_head_rcu(n, h);
-       }
-}
-
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:      the type * to use as a loop cursor.
diff --git a/include/linux/rtsx_common.h b/include/linux/rtsx_common.h
new file mode 100644 (file)
index 0000000..443176e
--- /dev/null
@@ -0,0 +1,50 @@
+/* Driver for Realtek driver-based card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#ifndef __RTSX_COMMON_H
+#define __RTSX_COMMON_H
+
+#define DRV_NAME_RTSX_PCI              "rtsx_pci"
+#define DRV_NAME_RTSX_PCI_SDMMC                "rtsx_pci_sdmmc"
+#define DRV_NAME_RTSX_PCI_MS           "rtsx_pci_ms"
+
+#define RTSX_REG_PAIR(addr, val)       (((u32)(addr) << 16) | (u8)(val))
+
+#define RTSX_SSC_DEPTH_4M              0x01
+#define RTSX_SSC_DEPTH_2M              0x02
+#define RTSX_SSC_DEPTH_1M              0x03
+#define RTSX_SSC_DEPTH_500K            0x04
+#define RTSX_SSC_DEPTH_250K            0x05
+
+#define RTSX_SD_CARD                   0
+#define RTSX_MS_CARD                   1
+
+#define CLK_TO_DIV_N                   0
+#define DIV_N_TO_CLK                   1
+
+struct platform_device;
+
+struct rtsx_slot {
+       struct platform_device  *p_dev;
+       void                    (*card_event)(struct platform_device *p_dev);
+};
+
+#endif
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
new file mode 100644 (file)
index 0000000..478acf6
--- /dev/null
@@ -0,0 +1,1367 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#ifndef __RTSX_PCI_H
+#define __RTSX_PCI_H
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/rtsx_common.h>
+
+#define MAX_RW_REG_CNT                 1024
+
+#define RTSX_HCBAR                     0x00
+#define RTSX_HCBCTLR                   0x04
+#define   STOP_CMD                     (0x01 << 28)
+#define   READ_REG_CMD                 0
+#define   WRITE_REG_CMD                        1
+#define   CHECK_REG_CMD                        2
+
+#define RTSX_HDBAR                     0x08
+#define   SG_INT                       0x04
+#define   SG_END                       0x02
+#define   SG_VALID                     0x01
+#define   SG_NO_OP                     0x00
+#define   SG_TRANS_DATA                        (0x02 << 4)
+#define   SG_LINK_DESC                 (0x03 << 4)
+#define RTSX_HDBCTLR                   0x0C
+#define   SDMA_MODE                    0x00
+#define   ADMA_MODE                    (0x02 << 26)
+#define   STOP_DMA                     (0x01 << 28)
+#define   TRIG_DMA                     (0x01 << 31)
+
+#define RTSX_HAIMR                     0x10
+#define   HAIMR_TRANS_START            (0x01 << 31)
+#define   HAIMR_READ                   0x00
+#define   HAIMR_WRITE                  (0x01 << 30)
+#define   HAIMR_READ_START             (HAIMR_TRANS_START | HAIMR_READ)
+#define   HAIMR_WRITE_START            (HAIMR_TRANS_START | HAIMR_WRITE)
+#define   HAIMR_TRANS_END                      (HAIMR_TRANS_START)
+
+#define RTSX_BIPR                      0x14
+#define   CMD_DONE_INT                 (1 << 31)
+#define   DATA_DONE_INT                        (1 << 30)
+#define   TRANS_OK_INT                 (1 << 29)
+#define   TRANS_FAIL_INT               (1 << 28)
+#define   XD_INT                       (1 << 27)
+#define   MS_INT                       (1 << 26)
+#define   SD_INT                       (1 << 25)
+#define   GPIO0_INT                    (1 << 24)
+#define   OC_INT                       (1 << 23)
+#define   SD_WRITE_PROTECT             (1 << 19)
+#define   XD_EXIST                     (1 << 18)
+#define   MS_EXIST                     (1 << 17)
+#define   SD_EXIST                     (1 << 16)
+#define   DELINK_INT                   GPIO0_INT
+#define   MS_OC_INT                    (1 << 23)
+#define   SD_OC_INT                    (1 << 22)
+
+#define CARD_INT               (XD_INT | MS_INT | SD_INT)
+#define NEED_COMPLETE_INT      (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
+#define RTSX_INT               (CMD_DONE_INT | NEED_COMPLETE_INT | \
+                                       CARD_INT | GPIO0_INT | OC_INT)
+#define CARD_EXIST             (XD_EXIST | MS_EXIST | SD_EXIST)
+
+#define RTSX_BIER                      0x18
+#define   CMD_DONE_INT_EN              (1 << 31)
+#define   DATA_DONE_INT_EN             (1 << 30)
+#define   TRANS_OK_INT_EN              (1 << 29)
+#define   TRANS_FAIL_INT_EN            (1 << 28)
+#define   XD_INT_EN                    (1 << 27)
+#define   MS_INT_EN                    (1 << 26)
+#define   SD_INT_EN                    (1 << 25)
+#define   GPIO0_INT_EN                 (1 << 24)
+#define   OC_INT_EN                    (1 << 23)
+#define   DELINK_INT_EN                        GPIO0_INT_EN
+#define   MS_OC_INT_EN                 (1 << 23)
+#define   SD_OC_INT_EN                 (1 << 22)
+
+
+/*
+ * macros for easy use
+ */
+#define rtsx_pci_writel(pcr, reg, value) \
+       iowrite32(value, (pcr)->remap_addr + reg)
+#define rtsx_pci_readl(pcr, reg) \
+       ioread32((pcr)->remap_addr + reg)
+#define rtsx_pci_writew(pcr, reg, value) \
+       iowrite16(value, (pcr)->remap_addr + reg)
+#define rtsx_pci_readw(pcr, reg) \
+       ioread16((pcr)->remap_addr + reg)
+#define rtsx_pci_writeb(pcr, reg, value) \
+       iowrite8(value, (pcr)->remap_addr + reg)
+#define rtsx_pci_readb(pcr, reg) \
+       ioread8((pcr)->remap_addr + reg)
+
+#define rtsx_pci_read_config_byte(pcr, where, val) \
+       pci_read_config_byte((pcr)->pci, where, val)
+
+#define rtsx_pci_write_config_byte(pcr, where, val) \
+       pci_write_config_byte((pcr)->pci, where, val)
+
+#define rtsx_pci_read_config_dword(pcr, where, val) \
+       pci_read_config_dword((pcr)->pci, where, val)
+
+#define rtsx_pci_write_config_dword(pcr, where, val) \
+       pci_write_config_dword((pcr)->pci, where, val)
+
+#define STATE_TRANS_NONE               0
+#define STATE_TRANS_CMD                        1
+#define STATE_TRANS_BUF                        2
+#define STATE_TRANS_SG                 3
+
+#define TRANS_NOT_READY                        0
+#define TRANS_RESULT_OK                        1
+#define TRANS_RESULT_FAIL              2
+#define TRANS_NO_DEVICE                        3
+
+#define RTSX_RESV_BUF_LEN              4096
+#define HOST_CMDS_BUF_LEN              1024
+#define HOST_SG_TBL_BUF_LEN            (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
+#define HOST_SG_TBL_ITEMS              (HOST_SG_TBL_BUF_LEN / 8)
+#define MAX_SG_ITEM_LEN                        0x80000
+#define HOST_TO_DEVICE                 0
+#define DEVICE_TO_HOST                 1
+
+#define OUTPUT_3V3                     0
+#define OUTPUT_1V8                     1
+
+#define RTSX_PHASE_MAX                 32
+#define RX_TUNING_CNT                  3
+
+#define MS_CFG                         0xFD40
+#define   SAMPLE_TIME_RISING           0x00
+#define   SAMPLE_TIME_FALLING          0x80
+#define   PUSH_TIME_DEFAULT            0x00
+#define   PUSH_TIME_ODD                        0x40
+#define   NO_EXTEND_TOGGLE             0x00
+#define   EXTEND_TOGGLE_CHK            0x20
+#define   MS_BUS_WIDTH_1               0x00
+#define   MS_BUS_WIDTH_4               0x10
+#define   MS_BUS_WIDTH_8               0x18
+#define   MS_2K_SECTOR_MODE            0x04
+#define   MS_512_SECTOR_MODE           0x00
+#define   MS_TOGGLE_TIMEOUT_EN         0x00
+#define   MS_TOGGLE_TIMEOUT_DISEN      0x01
+#define MS_NO_CHECK_INT                        0x02
+#define MS_TPC                         0xFD41
+#define MS_TRANS_CFG                   0xFD42
+#define   WAIT_INT                     0x80
+#define   NO_WAIT_INT                  0x00
+#define   NO_AUTO_READ_INT_REG         0x00
+#define   AUTO_READ_INT_REG            0x40
+#define   MS_CRC16_ERR                 0x20
+#define   MS_RDY_TIMEOUT               0x10
+#define   MS_INT_CMDNK                 0x08
+#define   MS_INT_BREQ                  0x04
+#define   MS_INT_ERR                   0x02
+#define   MS_INT_CED                   0x01
+#define MS_TRANSFER                    0xFD43
+#define   MS_TRANSFER_START            0x80
+#define   MS_TRANSFER_END              0x40
+#define   MS_TRANSFER_ERR              0x20
+#define   MS_BS_STATE                  0x10
+#define   MS_TM_READ_BYTES             0x00
+#define   MS_TM_NORMAL_READ            0x01
+#define   MS_TM_WRITE_BYTES            0x04
+#define   MS_TM_NORMAL_WRITE           0x05
+#define   MS_TM_AUTO_READ              0x08
+#define   MS_TM_AUTO_WRITE             0x0C
+#define MS_INT_REG                     0xFD44
+#define MS_BYTE_CNT                    0xFD45
+#define MS_SECTOR_CNT_L                        0xFD46
+#define MS_SECTOR_CNT_H                        0xFD47
+#define MS_DBUS_H                      0xFD48
+
+#define SD_CFG1                                0xFDA0
+#define   SD_CLK_DIVIDE_0              0x00
+#define   SD_CLK_DIVIDE_256            0xC0
+#define   SD_CLK_DIVIDE_128            0x80
+#define   SD_BUS_WIDTH_1BIT            0x00
+#define   SD_BUS_WIDTH_4BIT            0x01
+#define   SD_BUS_WIDTH_8BIT            0x02
+#define   SD_ASYNC_FIFO_NOT_RST                0x10
+#define   SD_20_MODE                   0x00
+#define   SD_DDR_MODE                  0x04
+#define   SD_30_MODE                   0x08
+#define   SD_CLK_DIVIDE_MASK           0xC0
+#define   SD_MODE_SELECT_MASK          0x0C
+#define SD_CFG2                                0xFDA1
+#define   SD_CALCULATE_CRC7            0x00
+#define   SD_NO_CALCULATE_CRC7         0x80
+#define   SD_CHECK_CRC16               0x00
+#define   SD_NO_CHECK_CRC16            0x40
+#define   SD_NO_CHECK_WAIT_CRC_TO      0x20
+#define   SD_WAIT_BUSY_END             0x08
+#define   SD_NO_WAIT_BUSY_END          0x00
+#define   SD_CHECK_CRC7                        0x00
+#define   SD_NO_CHECK_CRC7             0x04
+#define   SD_RSP_LEN_0                 0x00
+#define   SD_RSP_LEN_6                 0x01
+#define   SD_RSP_LEN_17                        0x02
+#define   SD_RSP_TYPE_R0               0x04
+#define   SD_RSP_TYPE_R1               0x01
+#define   SD_RSP_TYPE_R1b              0x09
+#define   SD_RSP_TYPE_R2               0x02
+#define   SD_RSP_TYPE_R3               0x05
+#define   SD_RSP_TYPE_R4               0x05
+#define   SD_RSP_TYPE_R5               0x01
+#define   SD_RSP_TYPE_R6               0x01
+#define   SD_RSP_TYPE_R7               0x01
+#define SD_CFG3                                0xFDA2
+#define   SD30_CLK_END_EN              0x10
+#define   SD_RSP_80CLK_TIMEOUT_EN      0x01
+
+#define SD_STAT1                       0xFDA3
+#define   SD_CRC7_ERR                  0x80
+#define   SD_CRC16_ERR                 0x40
+#define   SD_CRC_WRITE_ERR             0x20
+#define   SD_CRC_WRITE_ERR_MASK                0x1C
+#define   GET_CRC_TIME_OUT             0x02
+#define   SD_TUNING_COMPARE_ERR                0x01
+#define SD_STAT2                       0xFDA4
+#define   SD_RSP_80CLK_TIMEOUT         0x01
+
+#define SD_BUS_STAT                    0xFDA5
+#define   SD_CLK_TOGGLE_EN             0x80
+#define   SD_CLK_FORCE_STOP            0x40
+#define   SD_DAT3_STATUS               0x10
+#define   SD_DAT2_STATUS               0x08
+#define   SD_DAT1_STATUS               0x04
+#define   SD_DAT0_STATUS               0x02
+#define   SD_CMD_STATUS                        0x01
+#define SD_PAD_CTL                     0xFDA6
+#define   SD_IO_USING_1V8              0x80
+#define   SD_IO_USING_3V3              0x7F
+#define   TYPE_A_DRIVING               0x00
+#define   TYPE_B_DRIVING               0x01
+#define   TYPE_C_DRIVING               0x02
+#define   TYPE_D_DRIVING               0x03
+#define SD_SAMPLE_POINT_CTL            0xFDA7
+#define   DDR_FIX_RX_DAT               0x00
+#define   DDR_VAR_RX_DAT               0x80
+#define   DDR_FIX_RX_DAT_EDGE          0x00
+#define   DDR_FIX_RX_DAT_14_DELAY      0x40
+#define   DDR_FIX_RX_CMD               0x00
+#define   DDR_VAR_RX_CMD               0x20
+#define   DDR_FIX_RX_CMD_POS_EDGE      0x00
+#define   DDR_FIX_RX_CMD_14_DELAY      0x10
+#define   SD20_RX_POS_EDGE             0x00
+#define   SD20_RX_14_DELAY             0x08
+#define SD20_RX_SEL_MASK               0x08
+#define SD_PUSH_POINT_CTL              0xFDA8
+#define   DDR_FIX_TX_CMD_DAT           0x00
+#define   DDR_VAR_TX_CMD_DAT           0x80
+#define   DDR_FIX_TX_DAT_14_TSU                0x00
+#define   DDR_FIX_TX_DAT_12_TSU                0x40
+#define   DDR_FIX_TX_CMD_NEG_EDGE      0x00
+#define   DDR_FIX_TX_CMD_14_AHEAD      0x20
+#define   SD20_TX_NEG_EDGE             0x00
+#define   SD20_TX_14_AHEAD             0x10
+#define   SD20_TX_SEL_MASK             0x10
+#define   DDR_VAR_SDCLK_POL_SWAP       0x01
+#define SD_CMD0                                0xFDA9
+#define   SD_CMD_START                 0x40
+#define SD_CMD1                                0xFDAA
+#define SD_CMD2                                0xFDAB
+#define SD_CMD3                                0xFDAC
+#define SD_CMD4                                0xFDAD
+#define SD_CMD5                                0xFDAE
+#define SD_BYTE_CNT_L                  0xFDAF
+#define SD_BYTE_CNT_H                  0xFDB0
+#define SD_BLOCK_CNT_L                 0xFDB1
+#define SD_BLOCK_CNT_H                 0xFDB2
+#define SD_TRANSFER                    0xFDB3
+#define   SD_TRANSFER_START            0x80
+#define   SD_TRANSFER_END              0x40
+#define   SD_STAT_IDLE                 0x20
+#define   SD_TRANSFER_ERR              0x10
+#define   SD_TM_NORMAL_WRITE           0x00
+#define   SD_TM_AUTO_WRITE_3           0x01
+#define   SD_TM_AUTO_WRITE_4           0x02
+#define   SD_TM_AUTO_READ_3            0x05
+#define   SD_TM_AUTO_READ_4            0x06
+#define   SD_TM_CMD_RSP                        0x08
+#define   SD_TM_AUTO_WRITE_1           0x09
+#define   SD_TM_AUTO_WRITE_2           0x0A
+#define   SD_TM_NORMAL_READ            0x0C
+#define   SD_TM_AUTO_READ_1            0x0D
+#define   SD_TM_AUTO_READ_2            0x0E
+#define   SD_TM_AUTO_TUNING            0x0F
+#define SD_CMD_STATE                   0xFDB5
+#define   SD_CMD_IDLE                  0x80
+
+#define SD_DATA_STATE                  0xFDB6
+#define   SD_DATA_IDLE                 0x80
+#define REG_SD_STOP_SDCLK_CFG          0xFDB8
+#define   SD30_CLK_STOP_CFG_EN         0x04
+#define   SD30_CLK_STOP_CFG1           0x02
+#define   SD30_CLK_STOP_CFG0           0x01
+#define REG_PRE_RW_MODE                        0xFD70
+#define EN_INFINITE_MODE               0x01
+
+#define SRCTL                          0xFC13
+
+#define DCM_DRP_CTL                    0xFC23
+#define   DCM_RESET                    0x08
+#define   DCM_LOCKED                   0x04
+#define   DCM_208M                     0x00
+#define   DCM_TX                       0x01
+#define   DCM_RX                       0x02
+#define DCM_DRP_TRIG                   0xFC24
+#define   DRP_START                    0x80
+#define   DRP_DONE                     0x40
+#define DCM_DRP_CFG                    0xFC25
+#define   DRP_WRITE                    0x80
+#define   DRP_READ                     0x00
+#define   DCM_WRITE_ADDRESS_50         0x50
+#define   DCM_WRITE_ADDRESS_51         0x51
+#define   DCM_READ_ADDRESS_00          0x00
+#define   DCM_READ_ADDRESS_51          0x51
+#define DCM_DRP_WR_DATA_L              0xFC26
+#define DCM_DRP_WR_DATA_H              0xFC27
+#define DCM_DRP_RD_DATA_L              0xFC28
+#define DCM_DRP_RD_DATA_H              0xFC29
+#define SD_VPCLK0_CTL                  0xFC2A
+#define SD_VPCLK1_CTL                  0xFC2B
+#define   PHASE_SELECT_MASK            0x1F
+#define SD_DCMPS0_CTL                  0xFC2C
+#define SD_DCMPS1_CTL                  0xFC2D
+#define SD_VPTX_CTL                    SD_VPCLK0_CTL
+#define SD_VPRX_CTL                    SD_VPCLK1_CTL
+#define   PHASE_CHANGE                 0x80
+#define   PHASE_NOT_RESET              0x40
+#define SD_DCMPS_TX_CTL                        SD_DCMPS0_CTL
+#define SD_DCMPS_RX_CTL                        SD_DCMPS1_CTL
+#define   DCMPS_CHANGE                 0x80
+#define   DCMPS_CHANGE_DONE            0x40
+#define   DCMPS_ERROR                  0x20
+#define   DCMPS_CURRENT_PHASE          0x1F
+#define CARD_CLK_SOURCE                        0xFC2E
+#define   CRC_FIX_CLK                  (0x00 << 0)
+#define   CRC_VAR_CLK0                 (0x01 << 0)
+#define   CRC_VAR_CLK1                 (0x02 << 0)
+#define   SD30_FIX_CLK                 (0x00 << 2)
+#define   SD30_VAR_CLK0                        (0x01 << 2)
+#define   SD30_VAR_CLK1                        (0x02 << 2)
+#define   SAMPLE_FIX_CLK               (0x00 << 4)
+#define   SAMPLE_VAR_CLK0              (0x01 << 4)
+#define   SAMPLE_VAR_CLK1              (0x02 << 4)
+#define CARD_PWR_CTL                   0xFD50
+#define   PMOS_STRG_MASK               0x10
+#define   PMOS_STRG_800mA              0x10
+#define   PMOS_STRG_400mA              0x00
+#define   SD_POWER_OFF                 0x03
+#define   SD_PARTIAL_POWER_ON          0x01
+#define   SD_POWER_ON                  0x00
+#define   SD_POWER_MASK                        0x03
+#define   MS_POWER_OFF                 0x0C
+#define   MS_PARTIAL_POWER_ON          0x04
+#define   MS_POWER_ON                  0x00
+#define   MS_POWER_MASK                        0x0C
+#define   BPP_POWER_OFF                        0x0F
+#define   BPP_POWER_5_PERCENT_ON       0x0E
+#define   BPP_POWER_10_PERCENT_ON      0x0C
+#define   BPP_POWER_15_PERCENT_ON      0x08
+#define   BPP_POWER_ON                 0x00
+#define   BPP_POWER_MASK               0x0F
+#define   SD_VCC_PARTIAL_POWER_ON      0x02
+#define   SD_VCC_POWER_ON              0x00
+#define CARD_CLK_SWITCH                        0xFD51
+#define RTL8411B_PACKAGE_MODE          0xFD51
+#define CARD_SHARE_MODE                        0xFD52
+#define   CARD_SHARE_MASK              0x0F
+#define   CARD_SHARE_MULTI_LUN         0x00
+#define   CARD_SHARE_NORMAL            0x00
+#define   CARD_SHARE_48_SD             0x04
+#define   CARD_SHARE_48_MS             0x08
+#define   CARD_SHARE_BAROSSA_SD                0x01
+#define   CARD_SHARE_BAROSSA_MS                0x02
+#define CARD_DRIVE_SEL                 0xFD53
+#define   MS_DRIVE_8mA                 (0x01 << 6)
+#define   MMC_DRIVE_8mA                        (0x01 << 4)
+#define   XD_DRIVE_8mA                 (0x01 << 2)
+#define   GPIO_DRIVE_8mA               0x01
+#define RTS5209_CARD_DRIVE_DEFAULT     (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+                                       XD_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define RTL8411_CARD_DRIVE_DEFAULT     (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+                                       XD_DRIVE_8mA)
+#define RTSX_CARD_DRIVE_DEFAULT                (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+
+#define CARD_STOP                      0xFD54
+#define   SPI_STOP                     0x01
+#define   XD_STOP                      0x02
+#define   SD_STOP                      0x04
+#define   MS_STOP                      0x08
+#define   SPI_CLR_ERR                  0x10
+#define   XD_CLR_ERR                   0x20
+#define   SD_CLR_ERR                   0x40
+#define   MS_CLR_ERR                   0x80
+#define CARD_OE                                0xFD55
+#define   SD_OUTPUT_EN                 0x04
+#define   MS_OUTPUT_EN                 0x08
+#define CARD_AUTO_BLINK                        0xFD56
+#define CARD_GPIO_DIR                  0xFD57
+#define CARD_GPIO                      0xFD58
+#define CARD_DATA_SOURCE               0xFD5B
+#define   PINGPONG_BUFFER              0x01
+#define   RING_BUFFER                  0x00
+#define SD30_CLK_DRIVE_SEL             0xFD5A
+#define   DRIVER_TYPE_A                        0x05
+#define   DRIVER_TYPE_B                        0x03
+#define   DRIVER_TYPE_C                        0x02
+#define   DRIVER_TYPE_D                        0x01
+#define CARD_SELECT                    0xFD5C
+#define   SD_MOD_SEL                   2
+#define   MS_MOD_SEL                   3
+#define SD30_DRIVE_SEL                 0xFD5E
+#define   CFG_DRIVER_TYPE_A            0x02
+#define   CFG_DRIVER_TYPE_B            0x03
+#define   CFG_DRIVER_TYPE_C            0x01
+#define   CFG_DRIVER_TYPE_D            0x00
+#define SD30_CMD_DRIVE_SEL             0xFD5E
+#define SD30_DAT_DRIVE_SEL             0xFD5F
+#define CARD_CLK_EN                    0xFD69
+#define   SD_CLK_EN                    0x04
+#define   MS_CLK_EN                    0x08
+#define   SD40_CLK_EN          0x10
+#define SDIO_CTRL                      0xFD6B
+#define CD_PAD_CTL                     0xFD73
+#define   CD_DISABLE_MASK              0x07
+#define   MS_CD_DISABLE                        0x04
+#define   SD_CD_DISABLE                        0x02
+#define   XD_CD_DISABLE                        0x01
+#define   CD_DISABLE                   0x07
+#define   CD_ENABLE                    0x00
+#define   MS_CD_EN_ONLY                        0x03
+#define   SD_CD_EN_ONLY                        0x05
+#define   XD_CD_EN_ONLY                        0x06
+#define   FORCE_CD_LOW_MASK            0x38
+#define   FORCE_CD_XD_LOW              0x08
+#define   FORCE_CD_SD_LOW              0x10
+#define   FORCE_CD_MS_LOW              0x20
+#define   CD_AUTO_DISABLE              0x40
+#define FPDCTL                         0xFC00
+#define   SSC_POWER_DOWN               0x01
+#define   SD_OC_POWER_DOWN             0x02
+#define   ALL_POWER_DOWN               0x03
+#define   OC_POWER_DOWN                        0x02
+#define PDINFO                         0xFC01
+
+#define CLK_CTL                                0xFC02
+#define   CHANGE_CLK                   0x01
+#define   CLK_LOW_FREQ                 0x01
+
+#define CLK_DIV                                0xFC03
+#define   CLK_DIV_1                    0x01
+#define   CLK_DIV_2                    0x02
+#define   CLK_DIV_4                    0x03
+#define   CLK_DIV_8                    0x04
+#define CLK_SEL                                0xFC04
+
+#define SSC_DIV_N_0                    0xFC0F
+#define SSC_DIV_N_1                    0xFC10
+#define SSC_CTL1                       0xFC11
+#define    SSC_RSTB                    0x80
+#define    SSC_8X_EN                   0x40
+#define    SSC_FIX_FRAC                        0x20
+#define    SSC_SEL_1M                  0x00
+#define    SSC_SEL_2M                  0x08
+#define    SSC_SEL_4M                  0x10
+#define    SSC_SEL_8M                  0x18
+#define SSC_CTL2                       0xFC12
+#define    SSC_DEPTH_MASK              0x07
+#define    SSC_DEPTH_DISALBE           0x00
+#define    SSC_DEPTH_4M                        0x01
+#define    SSC_DEPTH_2M                        0x02
+#define    SSC_DEPTH_1M                        0x03
+#define    SSC_DEPTH_500K              0x04
+#define    SSC_DEPTH_250K              0x05
+#define RCCTL                          0xFC14
+
+#define FPGA_PULL_CTL                  0xFC1D
+#define OLT_LED_CTL                    0xFC1E
+#define   LED_SHINE_MASK               0x08
+#define   LED_SHINE_EN                 0x08
+#define   LED_SHINE_DISABLE            0x00
+#define GPIO_CTL                       0xFC1F
+
+#define LDO_CTL                                0xFC1E
+#define   BPP_ASIC_1V7                 0x00
+#define   BPP_ASIC_1V8                 0x01
+#define   BPP_ASIC_1V9                 0x02
+#define   BPP_ASIC_2V0                 0x03
+#define   BPP_ASIC_2V7                 0x04
+#define   BPP_ASIC_2V8                 0x05
+#define   BPP_ASIC_3V2                 0x06
+#define   BPP_ASIC_3V3                 0x07
+#define   BPP_REG_TUNED18              0x07
+#define   BPP_TUNED18_SHIFT_8402       5
+#define   BPP_TUNED18_SHIFT_8411       4
+#define   BPP_PAD_MASK                 0x04
+#define   BPP_PAD_3V3                  0x04
+#define   BPP_PAD_1V8                  0x00
+#define   BPP_LDO_POWB                 0x03
+#define   BPP_LDO_ON                   0x00
+#define   BPP_LDO_SUSPEND              0x02
+#define   BPP_LDO_OFF                  0x03
+#define EFUSE_CTL                      0xFC30
+#define EFUSE_ADD                      0xFC31
+#define SYS_VER                                0xFC32
+#define EFUSE_DATAL                    0xFC34
+#define EFUSE_DATAH                    0xFC35
+
+#define CARD_PULL_CTL1                 0xFD60
+#define CARD_PULL_CTL2                 0xFD61
+#define CARD_PULL_CTL3                 0xFD62
+#define CARD_PULL_CTL4                 0xFD63
+#define CARD_PULL_CTL5                 0xFD64
+#define CARD_PULL_CTL6                 0xFD65
+
+/* PCI Express Related Registers */
+#define IRQEN0                         0xFE20
+#define IRQSTAT0                       0xFE21
+#define    DMA_DONE_INT                        0x80
+#define    SUSPEND_INT                 0x40
+#define    LINK_RDY_INT                        0x20
+#define    LINK_DOWN_INT               0x10
+#define IRQEN1                         0xFE22
+#define IRQSTAT1                       0xFE23
+#define TLPRIEN                                0xFE24
+#define TLPRISTAT                      0xFE25
+#define TLPTIEN                                0xFE26
+#define TLPTISTAT                      0xFE27
+#define DMATC0                         0xFE28
+#define DMATC1                         0xFE29
+#define DMATC2                         0xFE2A
+#define DMATC3                         0xFE2B
+#define DMACTL                         0xFE2C
+#define   DMA_RST                      0x80
+#define   DMA_BUSY                     0x04
+#define   DMA_DIR_TO_CARD              0x00
+#define   DMA_DIR_FROM_CARD            0x02
+#define   DMA_EN                       0x01
+#define   DMA_128                      (0 << 4)
+#define   DMA_256                      (1 << 4)
+#define   DMA_512                      (2 << 4)
+#define   DMA_1024                     (3 << 4)
+#define   DMA_PACK_SIZE_MASK           0x30
+#define BCTL                           0xFE2D
+#define RBBC0                          0xFE2E
+#define RBBC1                          0xFE2F
+#define RBDAT                          0xFE30
+#define RBCTL                          0xFE34
+#define   U_AUTO_DMA_EN_MASK           0x20
+#define   U_AUTO_DMA_DISABLE           0x00
+#define   RB_FLUSH                     0x80
+#define CFGADDR0                       0xFE35
+#define CFGADDR1                       0xFE36
+#define CFGDATA0                       0xFE37
+#define CFGDATA1                       0xFE38
+#define CFGDATA2                       0xFE39
+#define CFGDATA3                       0xFE3A
+#define CFGRWCTL                       0xFE3B
+#define PHYRWCTL                       0xFE3C
+#define PHYDATA0                       0xFE3D
+#define PHYDATA1                       0xFE3E
+#define PHYADDR                                0xFE3F
+#define MSGRXDATA0                     0xFE40
+#define MSGRXDATA1                     0xFE41
+#define MSGRXDATA2                     0xFE42
+#define MSGRXDATA3                     0xFE43
+#define MSGTXDATA0                     0xFE44
+#define MSGTXDATA1                     0xFE45
+#define MSGTXDATA2                     0xFE46
+#define MSGTXDATA3                     0xFE47
+#define MSGTXCTL                       0xFE48
+#define LTR_CTL                                0xFE4A
+#define LTR_TX_EN_MASK         BIT(7)
+#define LTR_TX_EN_1                    BIT(7)
+#define LTR_TX_EN_0                    0
+#define LTR_LATENCY_MODE_MASK          BIT(6)
+#define LTR_LATENCY_MODE_HW            0
+#define LTR_LATENCY_MODE_SW            BIT(6)
+#define OBFF_CFG                       0xFE4C
+#define   OBFF_EN_MASK                 0x03
+#define   OBFF_DISABLE                 0x00
+
+#define CDRESUMECTL                    0xFE52
+#define WAKE_SEL_CTL                   0xFE54
+#define PCLK_CTL                       0xFE55
+#define   PCLK_MODE_SEL                        0x20
+#define PME_FORCE_CTL                  0xFE56
+
+#define ASPM_FORCE_CTL                 0xFE57
+#define   FORCE_ASPM_CTL0              0x10
+#define   FORCE_ASPM_VAL_MASK          0x03
+#define   FORCE_ASPM_L1_EN             0x02
+#define   FORCE_ASPM_L0_EN             0x01
+#define   FORCE_ASPM_NO_ASPM           0x00
+#define PM_CLK_FORCE_CTL               0xFE58
+#define   CLK_PM_EN                    0x01
+#define FUNC_FORCE_CTL                 0xFE59
+#define   FUNC_FORCE_UPME_XMT_DBG      0x02
+#define PERST_GLITCH_WIDTH             0xFE5C
+#define CHANGE_LINK_STATE              0xFE5B
+#define RESET_LOAD_REG                 0xFE5E
+#define EFUSE_CONTENT                  0xFE5F
+#define HOST_SLEEP_STATE               0xFE60
+#define   HOST_ENTER_S1                        1
+#define   HOST_ENTER_S3                        2
+
+#define SDIO_CFG                       0xFE70
+#define PM_EVENT_DEBUG                 0xFE71
+#define   PME_DEBUG_0                  0x08
+#define NFTS_TX_CTRL                   0xFE72
+
+#define PWR_GATE_CTRL                  0xFE75
+#define   PWR_GATE_EN                  0x01
+#define   LDO3318_PWR_MASK             0x06
+#define   LDO_ON                       0x00
+#define   LDO_SUSPEND                  0x04
+#define   LDO_OFF                      0x06
+#define PWD_SUSPEND_EN                 0xFE76
+#define LDO_PWR_SEL                    0xFE78
+
+#define L1SUB_CONFIG1                  0xFE8D
+#define   AUX_CLK_ACTIVE_SEL_MASK      0x01
+#define   MAC_CKSW_DONE                        0x00
+#define L1SUB_CONFIG2                  0xFE8E
+#define   L1SUB_AUTO_CFG               0x02
+#define L1SUB_CONFIG3                  0xFE8F
+#define   L1OFF_MBIAS2_EN_5250         BIT(7)
+
+#define DUMMY_REG_RESET_0              0xFE90
+#define   IC_VERSION_MASK              0x0F
+
+#define REG_VREF                       0xFE97
+#define   PWD_SUSPND_EN                        0x10
+#define RTS5260_DMA_RST_CTL_0          0xFEBF
+#define   RTS5260_DMA_RST              0x80
+#define   RTS5260_ADMA3_RST            0x40
+#define AUTOLOAD_CFG_BASE              0xFF00
+#define RELINK_TIME_MASK               0x01
+#define PETXCFG                                0xFF03
+#define FORCE_CLKREQ_DELINK_MASK       BIT(7)
+#define FORCE_CLKREQ_LOW       0x80
+#define FORCE_CLKREQ_HIGH      0x00
+
+#define PM_CTRL1                       0xFF44
+#define   CD_RESUME_EN_MASK            0xF0
+
+#define PM_CTRL2                       0xFF45
+#define PM_CTRL3                       0xFF46
+#define   SDIO_SEND_PME_EN             0x80
+#define   FORCE_RC_MODE_ON             0x40
+#define   FORCE_RX50_LINK_ON           0x20
+#define   D3_DELINK_MODE_EN            0x10
+#define   USE_PESRTB_CTL_DELINK                0x08
+#define   DELAY_PIN_WAKE               0x04
+#define   RESET_PIN_WAKE               0x02
+#define   PM_WAKE_EN                   0x01
+#define PM_CTRL4                       0xFF47
+
+/* Memory mapping */
+#define SRAM_BASE                      0xE600
+#define RBUF_BASE                      0xF400
+#define PPBUF_BASE1                    0xF800
+#define PPBUF_BASE2                    0xFA00
+#define IMAGE_FLAG_ADDR0               0xCE80
+#define IMAGE_FLAG_ADDR1               0xCE81
+
+#define RREF_CFG                       0xFF6C
+#define   RREF_VBGSEL_MASK             0x38
+#define   RREF_VBGSEL_1V25             0x28
+
+#define OOBS_CONFIG                    0xFF6E
+#define   OOBS_AUTOK_DIS               0x80
+#define   OOBS_VAL_MASK                        0x1F
+
+#define LDO_DV18_CFG                   0xFF70
+#define   LDO_DV18_SR_MASK             0xC0
+#define   LDO_DV18_SR_DF               0x40
+#define   DV331812_MASK                        0x70
+#define   DV331812_33                  0x70
+#define   DV331812_17                  0x30
+
+#define LDO_CONFIG2                    0xFF71
+#define   LDO_D3318_MASK               0x07
+#define   LDO_D3318_33V                        0x07
+#define   LDO_D3318_18V                        0x02
+#define   DV331812_VDD1                        0x04
+#define   DV331812_POWERON             0x08
+#define   DV331812_POWEROFF            0x00
+
+#define LDO_VCC_CFG0                   0xFF72
+#define   LDO_VCC_LMTVTH_MASK          0x30
+#define   LDO_VCC_LMTVTH_2A            0x10
+/*RTS5260*/
+#define   RTS5260_DVCC_TUNE_MASK       0x70
+#define   RTS5260_DVCC_33              0x70
+
+#define LDO_VCC_CFG1                   0xFF73
+#define   LDO_VCC_REF_TUNE_MASK                0x30
+#define   LDO_VCC_REF_1V2              0x20
+#define   LDO_VCC_TUNE_MASK            0x07
+#define   LDO_VCC_1V8                  0x04
+#define   LDO_VCC_3V3                  0x07
+#define   LDO_VCC_LMT_EN               0x08
+/*RTS5260*/
+#define          LDO_POW_SDVDD1_MASK           0x08
+#define          LDO_POW_SDVDD1_ON             0x08
+#define          LDO_POW_SDVDD1_OFF            0x00
+
+#define LDO_VIO_CFG                    0xFF75
+#define   LDO_VIO_SR_MASK              0xC0
+#define   LDO_VIO_SR_DF                        0x40
+#define   LDO_VIO_REF_TUNE_MASK                0x30
+#define   LDO_VIO_REF_1V2              0x20
+#define   LDO_VIO_TUNE_MASK            0x07
+#define   LDO_VIO_1V7                  0x03
+#define   LDO_VIO_1V8                  0x04
+#define   LDO_VIO_3V3                  0x07
+
+#define LDO_DV12S_CFG                  0xFF76
+#define   LDO_REF12_TUNE_MASK          0x18
+#define   LDO_REF12_TUNE_DF            0x10
+#define   LDO_D12_TUNE_MASK            0x07
+#define   LDO_D12_TUNE_DF              0x04
+
+#define LDO_AV12S_CFG                  0xFF77
+#define   LDO_AV12S_TUNE_MASK          0x07
+#define   LDO_AV12S_TUNE_DF            0x04
+
+#define SD40_LDO_CTL1                  0xFE7D
+#define   SD40_VIO_TUNE_MASK           0x70
+#define   SD40_VIO_TUNE_1V7            0x30
+#define   SD_VIO_LDO_1V8               0x40
+#define   SD_VIO_LDO_3V3               0x70
+
+#define RTS5260_AUTOLOAD_CFG4          0xFF7F
+#define   RTS5260_MIMO_DISABLE         0x8A
+
+#define RTS5260_REG_GPIO_CTL0          0xFC1A
+#define   RTS5260_REG_GPIO_MASK                0x01
+#define   RTS5260_REG_GPIO_ON          0x01
+#define   RTS5260_REG_GPIO_OFF         0x00
+
+#define PWR_GLOBAL_CTRL                        0xF200
+#define PCIE_L1_2_EN                   0x0C
+#define PCIE_L1_1_EN                   0x0A
+#define PCIE_L1_0_EN                   0x09
+#define PWR_FE_CTL                     0xF201
+#define PCIE_L1_2_PD_FE_EN             0x0C
+#define PCIE_L1_1_PD_FE_EN             0x0A
+#define PCIE_L1_0_PD_FE_EN             0x09
+#define CFG_PCIE_APHY_OFF_0            0xF204
+#define CFG_PCIE_APHY_OFF_0_DEFAULT    0xBF
+#define CFG_PCIE_APHY_OFF_1            0xF205
+#define CFG_PCIE_APHY_OFF_1_DEFAULT    0xFF
+#define CFG_PCIE_APHY_OFF_2            0xF206
+#define CFG_PCIE_APHY_OFF_2_DEFAULT    0x01
+#define CFG_PCIE_APHY_OFF_3            0xF207
+#define CFG_PCIE_APHY_OFF_3_DEFAULT    0x00
+#define CFG_L1_0_PCIE_MAC_RET_VALUE    0xF20C
+#define CFG_L1_0_PCIE_DPHY_RET_VALUE   0xF20E
+#define CFG_L1_0_SYS_RET_VALUE         0xF210
+#define CFG_L1_0_CRC_MISC_RET_VALUE    0xF212
+#define CFG_L1_0_CRC_SD30_RET_VALUE    0xF214
+#define CFG_L1_0_CRC_SD40_RET_VALUE    0xF216
+#define CFG_LP_FPWM_VALUE              0xF219
+#define CFG_LP_FPWM_VALUE_DEFAULT      0x18
+#define PWC_CDR                                0xF253
+#define PWC_CDR_DEFAULT                        0x03
+#define CFG_L1_0_RET_VALUE_DEFAULT     0x1B
+#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT    0x0C
+
+/* OCPCTL */
+#define SD_DETECT_EN                   0x08
+#define SD_OCP_INT_EN                  0x04
+#define SD_OCP_INT_CLR                 0x02
+#define SD_OC_CLR                      0x01
+
+#define SDVIO_DETECT_EN                        (1 << 7)
+#define SDVIO_OCP_INT_EN               (1 << 6)
+#define SDVIO_OCP_INT_CLR              (1 << 5)
+#define SDVIO_OC_CLR                   (1 << 4)
+
+/* OCPSTAT */
+#define SD_OCP_DETECT                  0x08
+#define SD_OC_NOW                      0x04
+#define SD_OC_EVER                     0x02
+
+#define SDVIO_OC_NOW                   (1 << 6)
+#define SDVIO_OC_EVER                  (1 << 5)
+
+#define REG_OCPCTL                     0xFD6A
+#define REG_OCPSTAT                    0xFD6E
+#define REG_OCPGLITCH                  0xFD6C
+#define REG_OCPPARA1                   0xFD6B
+#define REG_OCPPARA2                   0xFD6D
+
+/* rts5260 DV3318 OCP-related registers */
+#define REG_DV3318_OCPCTL              0xFD89
+#define DV3318_OCP_TIME_MASK   0xF0
+#define DV3318_DETECT_EN               0x08
+#define DV3318_OCP_INT_EN              0x04
+#define DV3318_OCP_INT_CLR             0x02
+#define DV3318_OCP_CLR                 0x01
+
+#define REG_DV3318_OCPSTAT             0xFD8A
+#define DV3318_OCP_GlITCH_TIME_MASK    0xF0
+#define DV3318_OCP_DETECT              0x08
+#define DV3318_OCP_NOW                 0x04
+#define DV3318_OCP_EVER                        0x02
+
+#define SD_OCP_GLITCH_MASK             0x0F
+
+/* OCPPARA1 */
+#define SDVIO_OCP_TIME_60              0x00
+#define SDVIO_OCP_TIME_100             0x10
+#define SDVIO_OCP_TIME_200             0x20
+#define SDVIO_OCP_TIME_400             0x30
+#define SDVIO_OCP_TIME_600             0x40
+#define SDVIO_OCP_TIME_800             0x50
+#define SDVIO_OCP_TIME_1100            0x60
+#define SDVIO_OCP_TIME_MASK            0x70
+
+#define SD_OCP_TIME_60                 0x00
+#define SD_OCP_TIME_100                        0x01
+#define SD_OCP_TIME_200                        0x02
+#define SD_OCP_TIME_400                        0x03
+#define SD_OCP_TIME_600                        0x04
+#define SD_OCP_TIME_800                        0x05
+#define SD_OCP_TIME_1100               0x06
+#define SD_OCP_TIME_MASK               0x07
+
+/* OCPPARA2 */
+#define SDVIO_OCP_THD_190              0x00
+#define SDVIO_OCP_THD_250              0x10
+#define SDVIO_OCP_THD_320              0x20
+#define SDVIO_OCP_THD_380              0x30
+#define SDVIO_OCP_THD_440              0x40
+#define SDVIO_OCP_THD_500              0x50
+#define SDVIO_OCP_THD_570              0x60
+#define SDVIO_OCP_THD_630              0x70
+#define SDVIO_OCP_THD_MASK             0x70
+
+#define SD_OCP_THD_450                 0x00
+#define SD_OCP_THD_550                 0x01
+#define SD_OCP_THD_650                 0x02
+#define SD_OCP_THD_750                 0x03
+#define SD_OCP_THD_850                 0x04
+#define SD_OCP_THD_950                 0x05
+#define SD_OCP_THD_1050                        0x06
+#define SD_OCP_THD_1150                        0x07
+#define SD_OCP_THD_MASK                        0x07
+
+#define SDVIO_OCP_GLITCH_MASK          0xF0
+#define SDVIO_OCP_GLITCH_NONE          0x00
+#define SDVIO_OCP_GLITCH_50U           0x10
+#define SDVIO_OCP_GLITCH_100U          0x20
+#define SDVIO_OCP_GLITCH_200U          0x30
+#define SDVIO_OCP_GLITCH_600U          0x40
+#define SDVIO_OCP_GLITCH_800U          0x50
+#define SDVIO_OCP_GLITCH_1M            0x60
+#define SDVIO_OCP_GLITCH_2M            0x70
+#define SDVIO_OCP_GLITCH_3M            0x80
+#define SDVIO_OCP_GLITCH_4M            0x90
+#define SDVIO_OCP_GLIVCH_5M            0xA0
+#define SDVIO_OCP_GLITCH_6M            0xB0
+#define SDVIO_OCP_GLITCH_7M            0xC0
+#define SDVIO_OCP_GLITCH_8M            0xD0
+#define SDVIO_OCP_GLITCH_9M            0xE0
+#define SDVIO_OCP_GLITCH_10M           0xF0
+
+#define SD_OCP_GLITCH_MASK             0x0F
+#define SD_OCP_GLITCH_NONE             0x00
+#define SD_OCP_GLITCH_50U              0x01
+#define SD_OCP_GLITCH_100U             0x02
+#define SD_OCP_GLITCH_200U             0x03
+#define SD_OCP_GLITCH_600U             0x04
+#define SD_OCP_GLITCH_800U             0x05
+#define SD_OCP_GLITCH_1M               0x06
+#define SD_OCP_GLITCH_2M               0x07
+#define SD_OCP_GLITCH_3M               0x08
+#define SD_OCP_GLITCH_4M               0x09
+#define SD_OCP_GLIVCH_5M               0x0A
+#define SD_OCP_GLITCH_6M               0x0B
+#define SD_OCP_GLITCH_7M               0x0C
+#define SD_OCP_GLITCH_8M               0x0D
+#define SD_OCP_GLITCH_9M               0x0E
+#define SD_OCP_GLITCH_10M              0x0F
+
+/* Phy register */
+#define PHY_PCR                                0x00
+#define   PHY_PCR_FORCE_CODE           0xB000
+#define   PHY_PCR_OOBS_CALI_50         0x0800
+#define   PHY_PCR_OOBS_VCM_08          0x0200
+#define   PHY_PCR_OOBS_SEN_90          0x0040
+#define   PHY_PCR_RSSI_EN              0x0002
+#define   PHY_PCR_RX10K                        0x0001
+
+#define PHY_RCR0                       0x01
+#define PHY_RCR1                       0x02
+#define   PHY_RCR1_ADP_TIME_4          0x0400
+#define   PHY_RCR1_VCO_COARSE          0x001F
+#define   PHY_RCR1_INIT_27S            0x0A1F
+#define PHY_SSCCR2                     0x02
+#define   PHY_SSCCR2_PLL_NCODE         0x0A00
+#define   PHY_SSCCR2_TIME0             0x001C
+#define   PHY_SSCCR2_TIME2_WIDTH       0x0003
+
+#define PHY_RCR2                       0x03
+#define   PHY_RCR2_EMPHASE_EN          0x8000
+#define   PHY_RCR2_NADJR               0x4000
+#define   PHY_RCR2_CDR_SR_2            0x0100
+#define   PHY_RCR2_FREQSEL_12          0x0040
+#define   PHY_RCR2_CDR_SC_12P          0x0010
+#define   PHY_RCR2_CALIB_LATE          0x0002
+#define   PHY_RCR2_INIT_27S            0xC152
+#define PHY_SSCCR3                     0x03
+#define   PHY_SSCCR3_STEP_IN           0x2740
+#define   PHY_SSCCR3_CHECK_DELAY       0x0008
+#define _PHY_ANA03                     0x03
+#define   _PHY_ANA03_TIMER_MAX         0x2700
+#define   _PHY_ANA03_OOBS_DEB_EN       0x0040
+#define   _PHY_CMU_DEBUG_EN            0x0008
+
+#define PHY_RTCR                       0x04
+#define PHY_RDR                                0x05
+#define   PHY_RDR_RXDSEL_1_9           0x4000
+#define   PHY_SSC_AUTO_PWD             0x0600
+#define PHY_TCR0                       0x06
+#define PHY_TCR1                       0x07
+#define PHY_TUNE                       0x08
+#define   PHY_TUNE_TUNEREF_1_0         0x4000
+#define   PHY_TUNE_VBGSEL_1252         0x0C00
+#define   PHY_TUNE_SDBUS_33            0x0200
+#define   PHY_TUNE_TUNED18             0x01C0
+#define   PHY_TUNE_TUNED12             0X0020
+#define   PHY_TUNE_TUNEA12             0x0004
+#define   PHY_TUNE_VOLTAGE_MASK                0xFC3F
+#define   PHY_TUNE_VOLTAGE_3V3         0x03C0
+#define   PHY_TUNE_D18_1V8             0x0100
+#define   PHY_TUNE_D18_1V7             0x0080
+#define PHY_ANA08                      0x08
+#define   PHY_ANA08_RX_EQ_DCGAIN       0x5000
+#define   PHY_ANA08_SEL_RX_EN          0x0400
+#define   PHY_ANA08_RX_EQ_VAL          0x03C0
+#define   PHY_ANA08_SCP                        0x0020
+#define   PHY_ANA08_SEL_IPI            0x0004
+
+#define PHY_IMR                                0x09
+#define PHY_BPCR                       0x0A
+#define   PHY_BPCR_IBRXSEL             0x0400
+#define   PHY_BPCR_IBTXSEL             0x0100
+#define   PHY_BPCR_IB_FILTER           0x0080
+#define   PHY_BPCR_CMIRROR_EN          0x0040
+
+#define PHY_BIST                       0x0B
+#define PHY_RAW_L                      0x0C
+#define PHY_RAW_H                      0x0D
+#define PHY_RAW_DATA                   0x0E
+#define PHY_HOST_CLK_CTRL              0x0F
+#define PHY_DMR                                0x10
+#define PHY_BACR                       0x11
+#define   PHY_BACR_BASIC_MASK          0xFFF3
+#define PHY_IER                                0x12
+#define PHY_BCSR                       0x13
+#define PHY_BPR                                0x14
+#define PHY_BPNR2                      0x15
+#define PHY_BPNR                       0x16
+#define PHY_BRNR2                      0x17
+#define PHY_BENR                       0x18
+#define PHY_REV                                0x19
+#define   PHY_REV_RESV                 0xE000
+#define   PHY_REV_RXIDLE_LATCHED       0x1000
+#define   PHY_REV_P1_EN                        0x0800
+#define   PHY_REV_RXIDLE_EN            0x0400
+#define   PHY_REV_CLKREQ_TX_EN         0x0200
+#define   PHY_REV_CLKREQ_RX_EN         0x0100
+#define   PHY_REV_CLKREQ_DT_1_0                0x0040
+#define   PHY_REV_STOP_CLKRD           0x0020
+#define   PHY_REV_RX_PWST              0x0008
+#define   PHY_REV_STOP_CLKWR           0x0004
+#define _PHY_REV0                      0x19
+#define   _PHY_REV0_FILTER_OUT         0x3800
+#define   _PHY_REV0_CDR_BYPASS_PFD     0x0100
+#define   _PHY_REV0_CDR_RX_IDLE_BYPASS 0x0002
+
+#define PHY_FLD0                       0x1A
+#define PHY_ANA1A                      0x1A
+#define   PHY_ANA1A_TXR_LOOPBACK       0x2000
+#define   PHY_ANA1A_RXT_BIST           0x0500
+#define   PHY_ANA1A_TXR_BIST           0x0040
+#define   PHY_ANA1A_REV                        0x0006
+#define   PHY_FLD0_INIT_27S            0x2546
+#define PHY_FLD1                       0x1B
+#define PHY_FLD2                       0x1C
+#define PHY_FLD3                       0x1D
+#define   PHY_FLD3_TIMER_4             0x0800
+#define   PHY_FLD3_TIMER_6             0x0020
+#define   PHY_FLD3_RXDELINK            0x0004
+#define   PHY_FLD3_INIT_27S            0x0004
+#define PHY_ANA1D                      0x1D
+#define   PHY_ANA1D_DEBUG_ADDR         0x0004
+#define _PHY_FLD0                      0x1D
+#define   _PHY_FLD0_CLK_REQ_20C                0x8000
+#define   _PHY_FLD0_RX_IDLE_EN         0x1000
+#define   _PHY_FLD0_BIT_ERR_RSTN       0x0800
+#define   _PHY_FLD0_BER_COUNT          0x01E0
+#define   _PHY_FLD0_BER_TIMER          0x001E
+#define   _PHY_FLD0_CHECK_EN           0x0001
+
+#define PHY_FLD4                       0x1E
+#define   PHY_FLD4_FLDEN_SEL           0x4000
+#define   PHY_FLD4_REQ_REF             0x2000
+#define   PHY_FLD4_RXAMP_OFF           0x1000
+#define   PHY_FLD4_REQ_ADDA            0x0800
+#define   PHY_FLD4_BER_COUNT           0x00E0
+#define   PHY_FLD4_BER_TIMER           0x000A
+#define   PHY_FLD4_BER_CHK_EN          0x0001
+#define   PHY_FLD4_INIT_27S            0x5C7F
+#define PHY_DIG1E                      0x1E
+#define   PHY_DIG1E_REV                        0x4000
+#define   PHY_DIG1E_D0_X_D1            0x1000
+#define   PHY_DIG1E_RX_ON_HOST         0x0800
+#define   PHY_DIG1E_RCLK_REF_HOST      0x0400
+#define   PHY_DIG1E_RCLK_TX_EN_KEEP    0x0040
+#define   PHY_DIG1E_RCLK_TX_TERM_KEEP  0x0020
+#define   PHY_DIG1E_RCLK_RX_EIDLE_ON   0x0010
+#define   PHY_DIG1E_TX_TERM_KEEP       0x0008
+#define   PHY_DIG1E_RX_TERM_KEEP       0x0004
+#define   PHY_DIG1E_TX_EN_KEEP         0x0002
+#define   PHY_DIG1E_RX_EN_KEEP         0x0001
+#define PHY_DUM_REG                    0x1F
+
+#define PCR_ASPM_SETTING_REG1          0x160
+#define PCR_ASPM_SETTING_REG2          0x168
+#define PCR_ASPM_SETTING_5260          0x178
+
+#define PCR_SETTING_REG1               0x724
+#define PCR_SETTING_REG2               0x814
+#define PCR_SETTING_REG3               0x747
+
+#define rtsx_pci_init_cmd(pcr)         ((pcr)->ci = 0)
+
+#define RTS5227_DEVICE_ID              0x5227
+#define RTS_MAX_TIMES_FREQ_REDUCTION   8
+
+struct rtsx_pcr;
+
+struct pcr_handle {
+       struct rtsx_pcr                 *pcr;
+};
+
+struct pcr_ops {
+       int (*write_phy)(struct rtsx_pcr *pcr, u8 addr, u16 val);
+       int (*read_phy)(struct rtsx_pcr *pcr, u8 addr, u16 *val);
+       int             (*extra_init_hw)(struct rtsx_pcr *pcr);
+       int             (*optimize_phy)(struct rtsx_pcr *pcr);
+       int             (*turn_on_led)(struct rtsx_pcr *pcr);
+       int             (*turn_off_led)(struct rtsx_pcr *pcr);
+       int             (*enable_auto_blink)(struct rtsx_pcr *pcr);
+       int             (*disable_auto_blink)(struct rtsx_pcr *pcr);
+       int             (*card_power_on)(struct rtsx_pcr *pcr, int card);
+       int             (*card_power_off)(struct rtsx_pcr *pcr, int card);
+       int             (*switch_output_voltage)(struct rtsx_pcr *pcr,
+                                               u8 voltage);
+       unsigned int    (*cd_deglitch)(struct rtsx_pcr *pcr);
+       int             (*conv_clk_and_div_n)(int clk, int dir);
+       void            (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
+       void            (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+       void            (*stop_cmd)(struct rtsx_pcr *pcr);
+
+       void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
+       int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
+       int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val);
+       void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
+       void (*full_on)(struct rtsx_pcr *pcr);
+       void (*power_saving)(struct rtsx_pcr *pcr);
+       void (*enable_ocp)(struct rtsx_pcr *pcr);
+       void (*disable_ocp)(struct rtsx_pcr *pcr);
+       void (*init_ocp)(struct rtsx_pcr *pcr);
+       void (*process_ocp)(struct rtsx_pcr *pcr);
+       int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val);
+       void (*clear_ocpstat)(struct rtsx_pcr *pcr);
+};
+
+enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+
+#define ASPM_L1_1_EN_MASK              BIT(3)
+#define ASPM_L1_2_EN_MASK              BIT(2)
+#define PM_L1_1_EN_MASK                BIT(1)
+#define PM_L1_2_EN_MASK                BIT(0)
+
+#define ASPM_L1_1_EN                   BIT(0)
+#define ASPM_L1_2_EN                   BIT(1)
+#define PM_L1_1_EN                             BIT(2)
+#define PM_L1_2_EN                             BIT(3)
+#define LTR_L1SS_PWR_GATE_EN   BIT(4)
+#define L1_SNOOZE_TEST_EN              BIT(5)
+#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN        BIT(6)
+
+enum dev_aspm_mode {
+       DEV_ASPM_DYNAMIC,
+       DEV_ASPM_BACKDOOR,
+       DEV_ASPM_STATIC,
+       DEV_ASPM_DISABLE,
+};
+
+/*
+ * struct rtsx_cr_option  - card reader option
+ * @dev_flags: device flags
+ * @force_clkreq_0: force clock request
+ * @ltr_en: enable ltr mode flag
+ * @ltr_enabled: ltr mode in configure space flag
+ * @ltr_active: ltr mode status
+ * @ltr_active_latency: ltr mode active latency
+ * @ltr_idle_latency: ltr mode idle latency
+ * @ltr_l1off_latency: ltr mode l1off latency
+ * @dev_aspm_mode: device aspm mode
+ * @l1_snooze_delay: l1 snooze delay
+ * @ltr_l1off_sspwrgate: ltr l1off sspwrgate
+ * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
+ * @ocp_en: enable ocp flag
+ * @sd_400mA_ocp_thd: 400mA ocp thd
+ * @sd_800mA_ocp_thd: 800mA ocp thd
+ */
+struct rtsx_cr_option {
+       u32 dev_flags;
+       bool force_clkreq_0;
+       bool ltr_en;
+       bool ltr_enabled;
+       bool ltr_active;
+       u32 ltr_active_latency;
+       u32 ltr_idle_latency;
+       u32 ltr_l1off_latency;
+       enum dev_aspm_mode dev_aspm_mode;
+       u32 l1_snooze_delay;
+       u8 ltr_l1off_sspwrgate;
+       u8 ltr_l1off_snooze_sspwrgate;
+       bool ocp_en;
+       u8 sd_400mA_ocp_thd;
+       u8 sd_800mA_ocp_thd;
+};
+
+/*
+ * struct rtsx_hw_param  - card reader hardware param
+ * @interrupt_en: indicate which interrutp enable
+ * @ocp_glitch: ocp glitch time
+ */
+struct rtsx_hw_param {
+       u32 interrupt_en;
+       u8 ocp_glitch;
+};
+
+#define rtsx_set_dev_flag(cr, flag) \
+       ((cr)->option.dev_flags |= (flag))
+#define rtsx_clear_dev_flag(cr, flag) \
+       ((cr)->option.dev_flags &= ~(flag))
+#define rtsx_check_dev_flag(cr, flag) \
+       ((cr)->option.dev_flags & (flag))
+
+struct rtsx_pcr {
+       struct pci_dev                  *pci;
+       unsigned int                    id;
+       int                             pcie_cap;
+       struct rtsx_cr_option   option;
+       struct rtsx_hw_param hw_param;
+
+       /* pci resources */
+       unsigned long                   addr;
+       void __iomem                    *remap_addr;
+       int                             irq;
+
+       /* host reserved buffer */
+       void                            *rtsx_resv_buf;
+       dma_addr_t                      rtsx_resv_buf_addr;
+
+       void                            *host_cmds_ptr;
+       dma_addr_t                      host_cmds_addr;
+       int                             ci;
+
+       void                            *host_sg_tbl_ptr;
+       dma_addr_t                      host_sg_tbl_addr;
+       int                             sgi;
+
+       u32                             bier;
+       char                            trans_result;
+
+       unsigned int                    card_inserted;
+       unsigned int                    card_removed;
+       unsigned int                    card_exist;
+
+       struct delayed_work             carddet_work;
+       struct delayed_work             idle_work;
+
+       spinlock_t                      lock;
+       struct mutex                    pcr_mutex;
+       struct completion               *done;
+       struct completion               *finish_me;
+
+       unsigned int                    cur_clock;
+       bool                            remove_pci;
+       bool                            msi_en;
+
+#define EXTRA_CAPS_SD_SDR50            (1 << 0)
+#define EXTRA_CAPS_SD_SDR104           (1 << 1)
+#define EXTRA_CAPS_SD_DDR50            (1 << 2)
+#define EXTRA_CAPS_MMC_HSDDR           (1 << 3)
+#define EXTRA_CAPS_MMC_HS200           (1 << 4)
+#define EXTRA_CAPS_MMC_8BIT            (1 << 5)
+       u32                             extra_caps;
+
+#define IC_VER_A                       0
+#define IC_VER_B                       1
+#define IC_VER_C                       2
+#define IC_VER_D                       3
+       u8                              ic_version;
+
+       u8                              sd30_drive_sel_1v8;
+       u8                              sd30_drive_sel_3v3;
+       u8                              card_drive_sel;
+#define ASPM_L1_EN                     0x02
+       u8                              aspm_en;
+       bool                            aspm_enabled;
+
+#define PCR_MS_PMOS                    (1 << 0)
+#define PCR_REVERSE_SOCKET             (1 << 1)
+       u32                             flags;
+
+       u32                             tx_initial_phase;
+       u32                             rx_initial_phase;
+
+       const u32                       *sd_pull_ctl_enable_tbl;
+       const u32                       *sd_pull_ctl_disable_tbl;
+       const u32                       *ms_pull_ctl_enable_tbl;
+       const u32                       *ms_pull_ctl_disable_tbl;
+
+       const struct pcr_ops            *ops;
+       enum PDEV_STAT                  state;
+
+       u16                             reg_pm_ctrl3;
+
+       int                             num_slots;
+       struct rtsx_slot                *slots;
+
+       u8                              dma_error_count;
+       u8                      ocp_stat;
+       u8                      ocp_stat2;
+};
+
+#define PID_524A       0x524A
+#define PID_5249       0x5249
+#define PID_5250       0x5250
+#define PID_525A       0x525A
+#define PID_5260       0x5260
+
+#define CHK_PCI_PID(pcr, pid)          ((pcr)->pci->device == (pid))
+#define PCI_VID(pcr)                   ((pcr)->pci->vendor)
+#define PCI_PID(pcr)                   ((pcr)->pci->device)
+#define is_version(pcr, pid, ver)                              \
+       (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
+#define pcr_dbg(pcr, fmt, arg...)                              \
+       dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
+
+#define SDR104_PHASE(val)              ((val) & 0xFF)
+#define SDR50_PHASE(val)               (((val) >> 8) & 0xFF)
+#define DDR50_PHASE(val)               (((val) >> 16) & 0xFF)
+#define SDR104_TX_PHASE(pcr)           SDR104_PHASE((pcr)->tx_initial_phase)
+#define SDR50_TX_PHASE(pcr)            SDR50_PHASE((pcr)->tx_initial_phase)
+#define DDR50_TX_PHASE(pcr)            DDR50_PHASE((pcr)->tx_initial_phase)
+#define SDR104_RX_PHASE(pcr)           SDR104_PHASE((pcr)->rx_initial_phase)
+#define SDR50_RX_PHASE(pcr)            SDR50_PHASE((pcr)->rx_initial_phase)
+#define DDR50_RX_PHASE(pcr)            DDR50_PHASE((pcr)->rx_initial_phase)
+#define SET_CLOCK_PHASE(sdr104, sdr50, ddr50)  \
+                               (((ddr50) << 16) | ((sdr50) << 8) | (sdr104))
+
+void rtsx_pci_start_run(struct rtsx_pcr *pcr);
+int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data);
+int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data);
+int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
+int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
+void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr);
+void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
+               u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
+void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr);
+int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout);
+int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+               int num_sg, bool read, int timeout);
+int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+               int num_sg, bool read);
+void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+               int num_sg, bool read);
+int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+               int count, bool read, int timeout);
+int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
+int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
+int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage);
+unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr);
+void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr);
+
+static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
+{
+       return (u8 *)(pcr->host_cmds_ptr);
+}
+
+static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
+               u8 mask, u8 append)
+{
+       int err;
+       u8 val;
+
+       err = pci_read_config_byte(pcr->pci, addr, &val);
+       if (err < 0)
+               return err;
+       return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
+}
+
+static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
+{
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg,     0xFF, val >> 24);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
+}
+
+static inline int rtsx_pci_update_phy(struct rtsx_pcr *pcr, u8 addr,
+       u16 mask, u16 append)
+{
+       int err;
+       u16 val;
+
+       err = rtsx_pci_read_phy_register(pcr, addr, &val);
+       if (err < 0)
+               return err;
+
+       return rtsx_pci_write_phy_register(pcr, addr, (val & mask) | append);
+}
+
+#endif
diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h
new file mode 100644 (file)
index 0000000..c446e4f
--- /dev/null
@@ -0,0 +1,628 @@
+/* Driver for Realtek RTS5139 USB card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ *   Roger Tseng <rogerable@realtek.com>
+ */
+
+#ifndef __RTSX_USB_H
+#define __RTSX_USB_H
+
+#include <linux/usb.h>
+
+/* related module names */
+#define RTSX_USB_SD_CARD       0
+#define RTSX_USB_MS_CARD       1
+
+/* endpoint numbers */
+#define EP_BULK_OUT            1
+#define EP_BULK_IN             2
+#define EP_INTR_IN             3
+
+/* USB vendor requests */
+#define RTSX_USB_REQ_REG_OP    0x00
+#define RTSX_USB_REQ_POLL      0x02
+
+/* miscellaneous parameters */
+#define MIN_DIV_N              60
+#define MAX_DIV_N              120
+
+#define MAX_PHASE              15
+#define RX_TUNING_CNT          3
+
+#define QFN24                  0
+#define LQFP48                 1
+#define CHECK_PKG(ucr, pkg)    ((ucr)->package == (pkg))
+
+/* data structures */
+struct rtsx_ucr {
+       u16                     vendor_id;
+       u16                     product_id;
+
+       int                     package;
+       u8                      ic_version;
+       bool                    is_rts5179;
+
+       unsigned int            cur_clk;
+
+       u8                      *cmd_buf;
+       unsigned int            cmd_idx;
+       u8                      *rsp_buf;
+
+       struct usb_device       *pusb_dev;
+       struct usb_interface    *pusb_intf;
+       struct usb_sg_request   current_sg;
+       unsigned char           *iobuf;
+       dma_addr_t              iobuf_dma;
+
+       struct timer_list       sg_timer;
+       struct mutex            dev_mutex;
+};
+
+/* buffer size */
+#define IOBUF_SIZE             1024
+
+/* prototypes of exported functions */
+extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status);
+
+extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data);
+extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask,
+               u8 data);
+
+extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask,
+               u8 data);
+extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr,
+               u8 *data);
+
+extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type,
+               u16 reg_addr, u8 mask, u8 data);
+extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout);
+extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout);
+extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
+                             void *buf, unsigned int len, int use_sg,
+                             unsigned int *act_len, int timeout);
+
+extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len);
+extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len);
+extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock,
+               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card);
+
+/* card status */
+#define SD_CD          0x01
+#define MS_CD          0x02
+#define XD_CD          0x04
+#define CD_MASK                (SD_CD | MS_CD | XD_CD)
+#define SD_WP          0x08
+
+/* reader command field offset & parameters */
+#define READ_REG_CMD           0
+#define WRITE_REG_CMD          1
+#define CHECK_REG_CMD          2
+
+#define PACKET_TYPE            4
+#define CNT_H                  5
+#define CNT_L                  6
+#define STAGE_FLAG             7
+#define CMD_OFFSET             8
+#define SEQ_WRITE_DATA_OFFSET  12
+
+#define BATCH_CMD              0
+#define SEQ_READ               1
+#define SEQ_WRITE              2
+
+#define STAGE_R                        0x01
+#define STAGE_DI               0x02
+#define STAGE_DO               0x04
+#define STAGE_MS_STATUS                0x08
+#define STAGE_XD_STATUS                0x10
+#define MODE_C                 0x00
+#define MODE_CR                        (STAGE_R)
+#define MODE_CDIR              (STAGE_R | STAGE_DI)
+#define MODE_CDOR              (STAGE_R | STAGE_DO)
+
+#define EP0_OP_SHIFT           14
+#define EP0_READ_REG_CMD       2
+#define EP0_WRITE_REG_CMD      3
+
+#define rtsx_usb_cmd_hdr_tag(ucr)              \
+       do {                                    \
+               ucr->cmd_buf[0] = 'R';          \
+               ucr->cmd_buf[1] = 'T';          \
+               ucr->cmd_buf[2] = 'C';          \
+               ucr->cmd_buf[3] = 'R';          \
+       } while (0)
+
+static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr)
+{
+       rtsx_usb_cmd_hdr_tag(ucr);
+       ucr->cmd_idx = 0;
+       ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD;
+}
+
+/* internal register address */
+#define FPDCTL                         0xFC00
+#define SSC_DIV_N_0                    0xFC07
+#define SSC_CTL1                       0xFC09
+#define SSC_CTL2                       0xFC0A
+#define CFG_MODE                       0xFC0E
+#define CFG_MODE_1                     0xFC0F
+#define RCCTL                          0xFC14
+#define SOF_WDOG                       0xFC28
+#define SYS_DUMMY0                     0xFC30
+
+#define MS_BLKEND                      0xFD30
+#define MS_READ_START                  0xFD31
+#define MS_READ_COUNT                  0xFD32
+#define MS_WRITE_START                 0xFD33
+#define MS_WRITE_COUNT                 0xFD34
+#define MS_COMMAND                     0xFD35
+#define MS_OLD_BLOCK_0                 0xFD36
+#define MS_OLD_BLOCK_1                 0xFD37
+#define MS_NEW_BLOCK_0                 0xFD38
+#define MS_NEW_BLOCK_1                 0xFD39
+#define MS_LOG_BLOCK_0                 0xFD3A
+#define MS_LOG_BLOCK_1                 0xFD3B
+#define MS_BUS_WIDTH                   0xFD3C
+#define MS_PAGE_START                  0xFD3D
+#define MS_PAGE_LENGTH                 0xFD3E
+#define MS_CFG                         0xFD40
+#define MS_TPC                         0xFD41
+#define MS_TRANS_CFG                   0xFD42
+#define MS_TRANSFER                    0xFD43
+#define MS_INT_REG                     0xFD44
+#define MS_BYTE_CNT                    0xFD45
+#define MS_SECTOR_CNT_L                        0xFD46
+#define MS_SECTOR_CNT_H                        0xFD47
+#define MS_DBUS_H                      0xFD48
+
+#define CARD_DMA1_CTL                  0xFD5C
+#define CARD_PULL_CTL1                 0xFD60
+#define CARD_PULL_CTL2                 0xFD61
+#define CARD_PULL_CTL3                 0xFD62
+#define CARD_PULL_CTL4                 0xFD63
+#define CARD_PULL_CTL5                 0xFD64
+#define CARD_PULL_CTL6                 0xFD65
+#define CARD_EXIST                     0xFD6F
+#define CARD_INT_PEND                  0xFD71
+
+#define LDO_POWER_CFG                  0xFD7B
+
+#define SD_CFG1                                0xFDA0
+#define SD_CFG2                                0xFDA1
+#define SD_CFG3                                0xFDA2
+#define SD_STAT1                       0xFDA3
+#define SD_STAT2                       0xFDA4
+#define SD_BUS_STAT                    0xFDA5
+#define SD_PAD_CTL                     0xFDA6
+#define SD_SAMPLE_POINT_CTL            0xFDA7
+#define SD_PUSH_POINT_CTL              0xFDA8
+#define SD_CMD0                                0xFDA9
+#define SD_CMD1                                0xFDAA
+#define SD_CMD2                                0xFDAB
+#define SD_CMD3                                0xFDAC
+#define SD_CMD4                                0xFDAD
+#define SD_CMD5                                0xFDAE
+#define SD_BYTE_CNT_L                  0xFDAF
+#define SD_BYTE_CNT_H                  0xFDB0
+#define SD_BLOCK_CNT_L                 0xFDB1
+#define SD_BLOCK_CNT_H                 0xFDB2
+#define SD_TRANSFER                    0xFDB3
+#define SD_CMD_STATE                   0xFDB5
+#define SD_DATA_STATE                  0xFDB6
+#define SD_VPCLK0_CTL                  0xFC2A
+#define SD_VPCLK1_CTL                  0xFC2B
+#define SD_DCMPS0_CTL                  0xFC2C
+#define SD_DCMPS1_CTL                  0xFC2D
+
+#define CARD_DMA1_CTL                  0xFD5C
+
+#define HW_VERSION                     0xFC01
+
+#define SSC_CLK_FPGA_SEL               0xFC02
+#define CLK_DIV                                0xFC03
+#define SFSM_ED                                0xFC04
+
+#define CD_DEGLITCH_WIDTH              0xFC20
+#define CD_DEGLITCH_EN                 0xFC21
+#define AUTO_DELINK_EN                 0xFC23
+
+#define FPGA_PULL_CTL                  0xFC1D
+#define CARD_CLK_SOURCE                        0xFC2E
+
+#define CARD_SHARE_MODE                        0xFD51
+#define CARD_DRIVE_SEL                 0xFD52
+#define CARD_STOP                      0xFD53
+#define CARD_OE                                0xFD54
+#define CARD_AUTO_BLINK                        0xFD55
+#define CARD_GPIO                      0xFD56
+#define SD30_DRIVE_SEL                 0xFD57
+
+#define CARD_DATA_SOURCE               0xFD5D
+#define CARD_SELECT                    0xFD5E
+
+#define CARD_CLK_EN                    0xFD79
+#define CARD_PWR_CTL                   0xFD7A
+
+#define OCPCTL                         0xFD80
+#define OCPPARA1                       0xFD81
+#define OCPPARA2                       0xFD82
+#define OCPSTAT                                0xFD83
+
+#define HS_USB_STAT                    0xFE01
+#define HS_VCONTROL                    0xFE26
+#define HS_VSTAIN                      0xFE27
+#define HS_VLOADM                      0xFE28
+#define HS_VSTAOUT                     0xFE29
+
+#define MC_IRQ                         0xFF00
+#define MC_IRQEN                       0xFF01
+#define MC_FIFO_CTL                    0xFF02
+#define MC_FIFO_BC0                    0xFF03
+#define MC_FIFO_BC1                    0xFF04
+#define MC_FIFO_STAT                   0xFF05
+#define MC_FIFO_MODE                   0xFF06
+#define MC_FIFO_RD_PTR0                        0xFF07
+#define MC_FIFO_RD_PTR1                        0xFF08
+#define MC_DMA_CTL                     0xFF10
+#define MC_DMA_TC0                     0xFF11
+#define MC_DMA_TC1                     0xFF12
+#define MC_DMA_TC2                     0xFF13
+#define MC_DMA_TC3                     0xFF14
+#define MC_DMA_RST                     0xFF15
+
+#define RBUF_SIZE_MASK                 0xFBFF
+#define RBUF_BASE                      0xF000
+#define PPBUF_BASE1                    0xF800
+#define PPBUF_BASE2                    0xFA00
+
+/* internal register value macros */
+#define POWER_OFF                      0x03
+#define PARTIAL_POWER_ON               0x02
+#define POWER_ON                       0x00
+#define POWER_MASK                     0x03
+#define LDO3318_PWR_MASK               0x0C
+#define LDO_ON                         0x00
+#define LDO_SUSPEND                    0x08
+#define LDO_OFF                                0x0C
+#define DV3318_AUTO_PWR_OFF            0x10
+#define FORCE_LDO_POWERB               0x60
+
+/* LDO_POWER_CFG */
+#define TUNE_SD18_MASK                 0x1C
+#define TUNE_SD18_1V7                  0x00
+#define TUNE_SD18_1V8                  (0x01 << 2)
+#define TUNE_SD18_1V9                  (0x02 << 2)
+#define TUNE_SD18_2V0                  (0x03 << 2)
+#define TUNE_SD18_2V7                  (0x04 << 2)
+#define TUNE_SD18_2V8                  (0x05 << 2)
+#define TUNE_SD18_2V9                  (0x06 << 2)
+#define TUNE_SD18_3V3                  (0x07 << 2)
+
+/* CLK_DIV */
+#define CLK_CHANGE                     0x80
+#define CLK_DIV_1                      0x00
+#define CLK_DIV_2                      0x01
+#define CLK_DIV_4                      0x02
+#define CLK_DIV_8                      0x03
+
+#define SSC_POWER_MASK                 0x01
+#define SSC_POWER_DOWN                 0x01
+#define SSC_POWER_ON                   0x00
+
+#define FPGA_VER                       0x80
+#define HW_VER_MASK                    0x0F
+
+#define EXTEND_DMA1_ASYNC_SIGNAL       0x02
+
+/* CFG_MODE*/
+#define XTAL_FREE                      0x80
+#define CLK_MODE_MASK                  0x03
+#define CLK_MODE_12M_XTAL              0x00
+#define CLK_MODE_NON_XTAL              0x01
+#define CLK_MODE_24M_OSC               0x02
+#define CLK_MODE_48M_OSC               0x03
+
+/* CFG_MODE_1*/
+#define RTS5179                                0x02
+
+#define NYET_EN                                0x01
+#define NYET_MSAK                      0x01
+
+#define SD30_DRIVE_MASK                        0x07
+#define SD20_DRIVE_MASK                        0x03
+
+#define DISABLE_SD_CD                  0x08
+#define DISABLE_MS_CD                  0x10
+#define DISABLE_XD_CD                  0x20
+#define SD_CD_DEGLITCH_EN              0x01
+#define MS_CD_DEGLITCH_EN              0x02
+#define XD_CD_DEGLITCH_EN              0x04
+
+#define        CARD_SHARE_LQFP48               0x04
+#define        CARD_SHARE_QFN24                0x00
+#define CARD_SHARE_LQFP_SEL            0x04
+#define        CARD_SHARE_XD                   0x00
+#define        CARD_SHARE_SD                   0x01
+#define        CARD_SHARE_MS                   0x02
+#define CARD_SHARE_MASK                        0x03
+
+
+/* SD30_DRIVE_SEL */
+#define DRIVER_TYPE_A                  0x05
+#define DRIVER_TYPE_B                  0x03
+#define DRIVER_TYPE_C                  0x02
+#define DRIVER_TYPE_D                  0x01
+
+/* SD_BUS_STAT */
+#define        SD_CLK_TOGGLE_EN                0x80
+#define        SD_CLK_FORCE_STOP               0x40
+#define        SD_DAT3_STATUS                  0x10
+#define        SD_DAT2_STATUS                  0x08
+#define        SD_DAT1_STATUS                  0x04
+#define        SD_DAT0_STATUS                  0x02
+#define        SD_CMD_STATUS                   0x01
+
+/* SD_PAD_CTL */
+#define        SD_IO_USING_1V8                 0x80
+#define        SD_IO_USING_3V3                 0x7F
+#define        TYPE_A_DRIVING                  0x00
+#define        TYPE_B_DRIVING                  0x01
+#define        TYPE_C_DRIVING                  0x02
+#define        TYPE_D_DRIVING                  0x03
+
+/* CARD_CLK_EN */
+#define SD_CLK_EN                      0x04
+#define MS_CLK_EN                      0x08
+
+/* CARD_SELECT */
+#define SD_MOD_SEL                     2
+#define MS_MOD_SEL                     3
+
+/* CARD_SHARE_MODE */
+#define        CARD_SHARE_LQFP48               0x04
+#define        CARD_SHARE_QFN24                0x00
+#define CARD_SHARE_LQFP_SEL            0x04
+#define        CARD_SHARE_XD                   0x00
+#define        CARD_SHARE_SD                   0x01
+#define        CARD_SHARE_MS                   0x02
+#define CARD_SHARE_MASK                        0x03
+
+/* SSC_CTL1 */
+#define SSC_RSTB                       0x80
+#define SSC_8X_EN                      0x40
+#define SSC_FIX_FRAC                   0x20
+#define SSC_SEL_1M                     0x00
+#define SSC_SEL_2M                     0x08
+#define SSC_SEL_4M                     0x10
+#define SSC_SEL_8M                     0x18
+
+/* SSC_CTL2 */
+#define SSC_DEPTH_MASK                 0x03
+#define SSC_DEPTH_DISALBE              0x00
+#define SSC_DEPTH_2M                   0x01
+#define SSC_DEPTH_1M                   0x02
+#define SSC_DEPTH_512K                 0x03
+
+/* SD_VPCLK0_CTL */
+#define PHASE_CHANGE                   0x80
+#define PHASE_NOT_RESET                        0x40
+
+/* SD_TRANSFER */
+#define        SD_TRANSFER_START               0x80
+#define        SD_TRANSFER_END                 0x40
+#define SD_STAT_IDLE                   0x20
+#define        SD_TRANSFER_ERR                 0x10
+#define        SD_TM_NORMAL_WRITE              0x00
+#define        SD_TM_AUTO_WRITE_3              0x01
+#define        SD_TM_AUTO_WRITE_4              0x02
+#define        SD_TM_AUTO_READ_3               0x05
+#define        SD_TM_AUTO_READ_4               0x06
+#define        SD_TM_CMD_RSP                   0x08
+#define        SD_TM_AUTO_WRITE_1              0x09
+#define        SD_TM_AUTO_WRITE_2              0x0A
+#define        SD_TM_NORMAL_READ               0x0C
+#define        SD_TM_AUTO_READ_1               0x0D
+#define        SD_TM_AUTO_READ_2               0x0E
+#define        SD_TM_AUTO_TUNING               0x0F
+
+/* SD_CFG1 */
+#define SD_CLK_DIVIDE_0                        0x00
+#define        SD_CLK_DIVIDE_256               0xC0
+#define        SD_CLK_DIVIDE_128               0x80
+#define SD_CLK_DIVIDE_MASK             0xC0
+#define        SD_BUS_WIDTH_1BIT               0x00
+#define        SD_BUS_WIDTH_4BIT               0x01
+#define        SD_BUS_WIDTH_8BIT               0x02
+#define        SD_ASYNC_FIFO_RST               0x10
+#define        SD_20_MODE                      0x00
+#define        SD_DDR_MODE                     0x04
+#define        SD_30_MODE                      0x08
+
+/* SD_CFG2 */
+#define        SD_CALCULATE_CRC7               0x00
+#define        SD_NO_CALCULATE_CRC7            0x80
+#define        SD_CHECK_CRC16                  0x00
+#define        SD_NO_CHECK_CRC16               0x40
+#define SD_WAIT_CRC_TO_EN              0x20
+#define        SD_WAIT_BUSY_END                0x08
+#define        SD_NO_WAIT_BUSY_END             0x00
+#define        SD_CHECK_CRC7                   0x00
+#define        SD_NO_CHECK_CRC7                0x04
+#define        SD_RSP_LEN_0                    0x00
+#define        SD_RSP_LEN_6                    0x01
+#define        SD_RSP_LEN_17                   0x02
+#define        SD_RSP_TYPE_R0                  0x04
+#define        SD_RSP_TYPE_R1                  0x01
+#define        SD_RSP_TYPE_R1b                 0x09
+#define        SD_RSP_TYPE_R2                  0x02
+#define        SD_RSP_TYPE_R3                  0x05
+#define        SD_RSP_TYPE_R4                  0x05
+#define        SD_RSP_TYPE_R5                  0x01
+#define        SD_RSP_TYPE_R6                  0x01
+#define        SD_RSP_TYPE_R7                  0x01
+
+/* SD_STAT1 */
+#define        SD_CRC7_ERR                     0x80
+#define        SD_CRC16_ERR                    0x40
+#define        SD_CRC_WRITE_ERR                0x20
+#define        SD_CRC_WRITE_ERR_MASK           0x1C
+#define        GET_CRC_TIME_OUT                0x02
+#define        SD_TUNING_COMPARE_ERR           0x01
+
+/* SD_DATA_STATE */
+#define SD_DATA_IDLE                   0x80
+
+/* CARD_DATA_SOURCE */
+#define PINGPONG_BUFFER                        0x01
+#define RING_BUFFER                    0x00
+
+/* CARD_OE */
+#define SD_OUTPUT_EN                   0x04
+#define MS_OUTPUT_EN                   0x08
+
+/* CARD_STOP */
+#define SD_STOP                                0x04
+#define MS_STOP                                0x08
+#define SD_CLR_ERR                     0x40
+#define MS_CLR_ERR                     0x80
+
+/* CARD_CLK_SOURCE */
+#define CRC_FIX_CLK                    (0x00 << 0)
+#define CRC_VAR_CLK0                   (0x01 << 0)
+#define CRC_VAR_CLK1                   (0x02 << 0)
+#define SD30_FIX_CLK                   (0x00 << 2)
+#define SD30_VAR_CLK0                  (0x01 << 2)
+#define SD30_VAR_CLK1                  (0x02 << 2)
+#define SAMPLE_FIX_CLK                 (0x00 << 4)
+#define SAMPLE_VAR_CLK0                        (0x01 << 4)
+#define SAMPLE_VAR_CLK1                        (0x02 << 4)
+
+/* SD_SAMPLE_POINT_CTL */
+#define        DDR_FIX_RX_DAT                  0x00
+#define        DDR_VAR_RX_DAT                  0x80
+#define        DDR_FIX_RX_DAT_EDGE             0x00
+#define        DDR_FIX_RX_DAT_14_DELAY         0x40
+#define        DDR_FIX_RX_CMD                  0x00
+#define        DDR_VAR_RX_CMD                  0x20
+#define        DDR_FIX_RX_CMD_POS_EDGE         0x00
+#define        DDR_FIX_RX_CMD_14_DELAY         0x10
+#define        SD20_RX_POS_EDGE                0x00
+#define        SD20_RX_14_DELAY                0x08
+#define SD20_RX_SEL_MASK               0x08
+
+/* SD_PUSH_POINT_CTL */
+#define        DDR_FIX_TX_CMD_DAT              0x00
+#define        DDR_VAR_TX_CMD_DAT              0x80
+#define        DDR_FIX_TX_DAT_14_TSU           0x00
+#define        DDR_FIX_TX_DAT_12_TSU           0x40
+#define        DDR_FIX_TX_CMD_NEG_EDGE         0x00
+#define        DDR_FIX_TX_CMD_14_AHEAD         0x20
+#define        SD20_TX_NEG_EDGE                0x00
+#define        SD20_TX_14_AHEAD                0x10
+#define SD20_TX_SEL_MASK               0x10
+#define        DDR_VAR_SDCLK_POL_SWAP          0x01
+
+/* MS_CFG */
+#define        SAMPLE_TIME_RISING              0x00
+#define        SAMPLE_TIME_FALLING             0x80
+#define        PUSH_TIME_DEFAULT               0x00
+#define        PUSH_TIME_ODD                   0x40
+#define        NO_EXTEND_TOGGLE                0x00
+#define        EXTEND_TOGGLE_CHK               0x20
+#define        MS_BUS_WIDTH_1                  0x00
+#define        MS_BUS_WIDTH_4                  0x10
+#define        MS_BUS_WIDTH_8                  0x18
+#define        MS_2K_SECTOR_MODE               0x04
+#define        MS_512_SECTOR_MODE              0x00
+#define        MS_TOGGLE_TIMEOUT_EN            0x00
+#define        MS_TOGGLE_TIMEOUT_DISEN         0x01
+#define MS_NO_CHECK_INT                        0x02
+
+/* MS_TRANS_CFG */
+#define        WAIT_INT                        0x80
+#define        NO_WAIT_INT                     0x00
+#define        NO_AUTO_READ_INT_REG            0x00
+#define        AUTO_READ_INT_REG               0x40
+#define        MS_CRC16_ERR                    0x20
+#define        MS_RDY_TIMEOUT                  0x10
+#define        MS_INT_CMDNK                    0x08
+#define        MS_INT_BREQ                     0x04
+#define        MS_INT_ERR                      0x02
+#define        MS_INT_CED                      0x01
+
+/* MS_TRANSFER */
+#define        MS_TRANSFER_START               0x80
+#define        MS_TRANSFER_END                 0x40
+#define        MS_TRANSFER_ERR                 0x20
+#define        MS_BS_STATE                     0x10
+#define        MS_TM_READ_BYTES                0x00
+#define        MS_TM_NORMAL_READ               0x01
+#define        MS_TM_WRITE_BYTES               0x04
+#define        MS_TM_NORMAL_WRITE              0x05
+#define        MS_TM_AUTO_READ                 0x08
+#define        MS_TM_AUTO_WRITE                0x0C
+#define MS_TM_SET_CMD                  0x06
+#define MS_TM_COPY_PAGE                        0x07
+#define MS_TM_MULTI_READ               0x02
+#define MS_TM_MULTI_WRITE              0x03
+
+/* MC_FIFO_CTL */
+#define FIFO_FLUSH                     0x01
+
+/* MC_DMA_RST */
+#define DMA_RESET  0x01
+
+/* MC_DMA_CTL */
+#define DMA_TC_EQ_0                    0x80
+#define DMA_DIR_TO_CARD                        0x00
+#define DMA_DIR_FROM_CARD              0x02
+#define DMA_EN                         0x01
+#define DMA_128                                (0 << 2)
+#define DMA_256                                (1 << 2)
+#define DMA_512                                (2 << 2)
+#define DMA_1024                       (3 << 2)
+#define DMA_PACK_SIZE_MASK             0x0C
+
+/* CARD_INT_PEND */
+#define XD_INT                         0x10
+#define MS_INT                         0x08
+#define SD_INT                         0x04
+
+/* LED operations*/
+static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr)
+{
+       return  rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02);
+}
+
+static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr)
+{
+       return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03);
+}
+
+/* HW error clearing */
+static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr)
+{
+       rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8);
+}
+
+static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr)
+{
+       rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL,
+                       FIFO_FLUSH, FIFO_FLUSH);
+       rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET);
+}
+#endif /* __RTS51139_H */
index cc0072e..857a72c 100644 (file)
@@ -10,9 +10,6 @@
  */
 typedef struct {
        arch_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
-       unsigned int break_lock;
-#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        unsigned int magic, owner_cpu;
        void *owner;
index 21991d6..68a504f 100644 (file)
@@ -849,17 +849,6 @@ struct task_struct {
        struct held_lock                held_locks[MAX_LOCK_DEPTH];
 #endif
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-#define MAX_XHLOCKS_NR 64UL
-       struct hist_lock *xhlocks; /* Crossrelease history locks */
-       unsigned int xhlock_idx;
-       /* For restoring at history boundaries */
-       unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
-       unsigned int hist_id;
-       /* For overwrite check at each context exit */
-       unsigned int hist_id_save[XHLOCK_CTX_NR];
-#endif
-
 #ifdef CONFIG_UBSAN
        unsigned int                    in_ubsan;
 #endif
@@ -1457,12 +1446,21 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
 void yield(void);
 
 union thread_union {
+#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+       struct task_struct task;
+#endif
 #ifndef CONFIG_THREAD_INFO_IN_TASK
        struct thread_info thread_info;
 #endif
        unsigned long stack[THREAD_SIZE/sizeof(long)];
 };
 
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+extern struct thread_info init_thread_info;
+#endif
+
+extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
+
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 static inline struct thread_info *task_thread_info(struct task_struct *task)
 {
@@ -1503,7 +1501,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
        __set_task_comm(tsk, from, false);
 }
 
-extern char *get_task_comm(char *to, struct task_struct *tsk);
+extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
+#define get_task_comm(buf, tsk) ({                     \
+       BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);     \
+       __get_task_comm(buf, sizeof(buf), tsk);         \
+})
 
 #ifdef CONFIG_SMP
 void scheduler_ipi(void);
index 9c88473..ec912d0 100644 (file)
@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
 #define MMF_UNSTABLE           22      /* mm is unstable for copy_from_user */
 #define MMF_HUGE_ZERO_PAGE     23      /* mm has ever used the global huge zero page */
 #define MMF_DISABLE_THP                24      /* disable THP for all VMAs */
+#define MMF_OOM_VICTIM         25      /* mm is the oom victim */
 #define MMF_DISABLE_THP_MASK   (1 << MMF_DISABLE_THP)
 
 #define MMF_INIT_MASK          (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
index e69402d..d4bb46a 100644 (file)
@@ -184,7 +184,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
        struct serdev_device *serdev = ctrl->serdev;
 
        if (!serdev || !serdev->ops->receive_buf)
-               return -EINVAL;
+               return 0;
 
        return serdev->ops->receive_buf(serdev, data, count);
 }
@@ -193,6 +193,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
 
 int serdev_device_open(struct serdev_device *);
 void serdev_device_close(struct serdev_device *);
+int devm_serdev_device_open(struct device *, struct serdev_device *);
 unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
 void serdev_device_set_flow_control(struct serdev_device *, bool);
 int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
index ff3642d..94081e9 100644 (file)
@@ -17,7 +17,6 @@ struct sh_eth_plat_data {
        unsigned char mac_addr[ETH_ALEN];
        unsigned no_ether_link:1;
        unsigned ether_link_active_low:1;
-       unsigned needs_init:1;
 };
 
 #endif
index bc486ef..a38c80e 100644 (file)
@@ -1406,8 +1406,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
 }
 
 /*
- * If users == 1, we are the only owner and are can avoid redundant
- * atomic change.
+ * If users == 1, we are the only owner and can avoid redundant atomic changes.
  */
 
 /**
index 3c6d393..ec85b7a 100644 (file)
@@ -12,11 +12,9 @@ struct device;
 extern int register_sound_special(const struct file_operations *fops, int unit);
 extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev);
 extern int register_sound_mixer(const struct file_operations *fops, int dev);
-extern int register_sound_midi(const struct file_operations *fops, int dev);
 extern int register_sound_dsp(const struct file_operations *fops, int dev);
 
 extern void unregister_sound_special(int unit);
 extern void unregister_sound_mixer(int unit);
-extern void unregister_sound_midi(int unit);
 extern void unregister_sound_dsp(int unit);
 #endif /* _LINUX_SOUND_H */
index 7b2170b..bc6bb32 100644 (file)
@@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  *     for that name.  This appears in the sysfs "modalias" attribute
  *     for driver coldplugging, and in uevents used for hotplugging
  * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
- *     when not using a GPIO line)
+ *     not using a GPIO line)
  *
  * @statistics: statistics for the spi_device
  *
index a391861..3bf2735 100644 (file)
@@ -107,16 +107,11 @@ do {                                                              \
 
 #define raw_spin_is_locked(lock)       arch_spin_is_locked(&(lock)->raw_lock)
 
-#ifdef CONFIG_GENERIC_LOCKBREAK
-#define raw_spin_is_contended(lock) ((lock)->break_lock)
-#else
-
 #ifdef arch_spin_is_contended
 #define raw_spin_is_contended(lock)    arch_spin_is_contended(&(lock)->raw_lock)
 #else
 #define raw_spin_is_contended(lock)    (((void)(lock), 0))
 #endif /*arch_spin_is_contended*/
-#endif
 
 /*
  * This barrier must provide two things:
index 73548eb..24b4e6f 100644 (file)
@@ -19,9 +19,6 @@
 
 typedef struct raw_spinlock {
        arch_spinlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
-       unsigned int break_lock;
-#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        unsigned int magic, owner_cpu;
        void *owner;
index 410ecf1..cfd83eb 100644 (file)
@@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
 {
        __kernel_size_t ret;
        size_t p_size = __builtin_object_size(p, 0);
-       if (p_size == (size_t)-1)
+
+       /* Work around gcc excess stack consumption issue */
+       if (p_size == (size_t)-1 ||
+           (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
                return __builtin_strlen(p);
        ret = strnlen(p, p_size);
        if (p_size <= ret)
index 270bad0..40d2822 100644 (file)
@@ -213,7 +213,7 @@ extern void __init cache_initialize(void);
 extern int cache_register_net(struct cache_detail *cd, struct net *net);
 extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
 
-extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net);
+extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net);
 extern void cache_destroy_net(struct cache_detail *cd, struct net *net);
 
 extern void sunrpc_init_cache_detail(struct cache_detail *cd);
index d60b0f5..cc22a24 100644 (file)
@@ -443,32 +443,8 @@ extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
 extern void pm_print_active_wakeup_sources(void);
 
-static inline void lock_system_sleep(void)
-{
-       current->flags |= PF_FREEZER_SKIP;
-       mutex_lock(&pm_mutex);
-}
-
-static inline void unlock_system_sleep(void)
-{
-       /*
-        * Don't use freezer_count() because we don't want the call to
-        * try_to_freeze() here.
-        *
-        * Reason:
-        * Fundamentally, we just don't need it, because freezing condition
-        * doesn't come into effect until we release the pm_mutex lock,
-        * since the freezer always works with pm_mutex held.
-        *
-        * More importantly, in the case of hibernation,
-        * unlock_system_sleep() gets called in snapshot_read() and
-        * snapshot_write() when the freezing condition is still in effect.
-        * Which means, if we use try_to_freeze() here, it would make them
-        * enter the refrigerator, thus causing hibernation to lockup.
-        */
-       current->flags &= ~PF_FREEZER_SKIP;
-       mutex_unlock(&pm_mutex);
-}
+extern void lock_system_sleep(void);
+extern void unlock_system_sleep(void);
 
 #else /* !CONFIG_PM_SLEEP */
 
index 9c5a262..1d3877c 100644 (file)
@@ -124,6 +124,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
        return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
 }
 
+static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+{
+       return swp_offset(entry);
+}
+
 static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
        return pfn_to_page(swp_offset(entry));
@@ -154,6 +159,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
        return false;
 }
 
+static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+{
+       return 0;
+}
+
 static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
        return NULL;
@@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t entry)
        return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
 }
 
+static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+{
+       return swp_offset(entry);
+}
+
 static inline struct page *migration_entry_to_page(swp_entry_t entry)
 {
        struct page *p = pfn_to_page(swp_offset(entry));
@@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp)
 {
        return 0;
 }
+
+static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+{
+       return 0;
+}
+
 static inline struct page *migration_entry_to_page(swp_entry_t entry)
 {
        return NULL;
index e32dfe0..40839c0 100644 (file)
@@ -117,6 +117,12 @@ struct attribute_group {
        .show   = _name##_show,                                         \
 }
 
+#define __ATTR_RO_MODE(_name, _mode) {                                 \
+       .attr   = { .name = __stringify(_name),                         \
+                   .mode = VERIFY_OCTAL_PERMISSIONS(_mode) },          \
+       .show   = _name##_show,                                         \
+}
+
 #define __ATTR_WO(_name) {                                             \
        .attr   = { .name = __stringify(_name), .mode = S_IWUSR },      \
        .store  = _name##_store,                                        \
index df5d97a..ca4a636 100644 (file)
@@ -224,7 +224,8 @@ struct tcp_sock {
                rate_app_limited:1,  /* rate_{delivered,interval_us} limited? */
                fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
                fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
-               unused:3;
+               is_sack_reneg:1,    /* in recovery from loss with SACK reneg? */
+               unused:2;
        u8      nonagle     : 4,/* Disable Nagle algorithm?             */
                thin_lto    : 1,/* Use linear timeouts for thin streams */
                unused1     : 1,
index f442d1a..7cc3592 100644 (file)
@@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void);
 extern void tick_nohz_irq_exit(void);
 extern ktime_t tick_nohz_get_sleep_length(void);
 extern unsigned long tick_nohz_get_idle_calls(void);
+extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
 #else /* !CONFIG_NO_HZ_COMMON */
index 04af640..2448f9c 100644 (file)
@@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j);
 unsigned long round_jiffies_up_relative(unsigned long j);
 
 #ifdef CONFIG_HOTPLUG_CPU
+int timers_prepare_cpu(unsigned int cpu);
 int timers_dead_cpu(unsigned int cpu);
 #else
-#define timers_dead_cpu NULL
+#define timers_prepare_cpu     NULL
+#define timers_dead_cpu                NULL
 #endif
 
 #endif
index d24991c..b95ffb2 100644 (file)
@@ -18,7 +18,7 @@
  */
 struct trace_export {
        struct trace_export __rcu       *next;
-       void (*write)(const void *, unsigned int);
+       void (*write)(struct trace_export *, const void *, unsigned int);
 };
 
 int register_ftrace_export(struct trace_export *export);
index a698777..e2ec358 100644 (file)
@@ -82,6 +82,7 @@ struct usbnet {
 #              define EVENT_RX_KILL    10
 #              define EVENT_LINK_CHANGE        11
 #              define EVENT_SET_RX_MODE        12
+#              define EVENT_NO_IP_ALIGN        13
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
index dc8cd47..977aabf 100644 (file)
@@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
 
 static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
 {
+       if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+               key = INADDR_ANY;
+
        return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
 }
 
index 8b8118a..fb94a8b 100644 (file)
@@ -815,6 +815,8 @@ struct cfg80211_csa_settings {
        u8 count;
 };
 
+#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
+
 /**
  * struct iface_combination_params - input parameters for interface combinations
  *
@@ -3226,7 +3228,6 @@ struct cfg80211_ops {
  * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
  * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
  *     auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
- * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
  * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
  *     firmware.
  * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
index b091fd5..d49d607 100644 (file)
@@ -521,4 +521,12 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
 }
 #endif
 
+static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       if (dst && dst->ops->update_pmtu)
+               dst->ops->update_pmtu(dst, NULL, skb, mtu);
+}
+
 #endif /* _NET_DST_H */
index 2fdb29c..fdad414 100644 (file)
@@ -44,10 +44,10 @@ struct guehdr {
 #else
 #error  "Please fix <asm/byteorder.h>"
 #endif
-                       __u8    proto_ctype;
-                       __u16   flags;
+                       __u8    proto_ctype;
+                       __be16  flags;
                };
-               __u32 word;
+               __be32  word;
        };
 };
 
@@ -84,11 +84,10 @@ static inline size_t guehdr_priv_flags_len(__be32 flags)
  * if there is an unknown standard or private flags, or the options length for
  * the flags exceeds the options length specific in hlen of the GUE header.
  */
-static inline int validate_gue_flags(struct guehdr *guehdr,
-                                    size_t optlen)
+static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen)
 {
+       __be16 flags = guehdr->flags;
        size_t len;
-       __be32 flags = guehdr->flags;
 
        if (flags & ~GUE_FLAGS_ALL)
                return 1;
@@ -101,12 +100,13 @@ static inline int validate_gue_flags(struct guehdr *guehdr,
                /* Private flags are last four bytes accounted in
                 * guehdr_flags_len
                 */
-               flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV);
+               __be32 pflags = *(__be32 *)((void *)&guehdr[1] +
+                                           len - GUE_LEN_PRIV);
 
-               if (flags & ~GUE_PFLAGS_ALL)
+               if (pflags & ~GUE_PFLAGS_ALL)
                        return 1;
 
-               len += guehdr_priv_flags_len(flags);
+               len += guehdr_priv_flags_len(pflags);
                if (len > optlen)
                        return 1;
        }
index 9896f46..af8addb 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/flow_dissector.h>
 
 #define IPV4_MAX_PMTU          65535U          /* RFC 2675, Section 5.1 */
+#define IPV4_MIN_MTU           68                      /* RFC 791 */
 
 struct sock;
 
index f73797e..2212382 100644 (file)
@@ -331,6 +331,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
                           int flags);
 int ip6_flowlabel_init(void);
 void ip6_flowlabel_cleanup(void);
+bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
 
 static inline void fl6_sock_release(struct ip6_flowlabel *fl)
 {
index cc9073e..eec143c 100644 (file)
@@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
  * ieee80211_nullfunc_get - retrieve a nullfunc template
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @qos_ok: QoS NDP is acceptable to the caller, this should be set
+ *     if at all possible
  *
  * Creates a Nullfunc template which can, for example, uploaded to
  * hardware. The template must be updated after association so that correct
  * BSSID and address is used.
  *
+ * If @qos_ndp is set and the association is to an AP with QoS/WMM, the
+ * returned packet will be QoS NDP.
+ *
  * Note: Caller (or hardware) is responsible for setting the
  * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
  *
  * Return: The nullfunc template. %NULL on error.
  */
 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif);
+                                      struct ieee80211_vif *vif,
+                                      bool qos_ok);
 
 /**
  * ieee80211_probereq_get - retrieve a Probe Request template
index 10f99da..0490084 100644 (file)
@@ -223,6 +223,11 @@ int net_eq(const struct net *net1, const struct net *net2)
        return net1 == net2;
 }
 
+static inline int check_net(const struct net *net)
+{
+       return atomic_read(&net->count) != 0;
+}
+
 void net_drop_ns(void *);
 
 #else
@@ -247,6 +252,11 @@ int net_eq(const struct net *net1, const struct net *net2)
        return 1;
 }
 
+static inline int check_net(const struct net *net)
+{
+       return 1;
+}
+
 #define net_drop_ns NULL
 #endif
 
index 0105445..753ac93 100644 (file)
@@ -522,7 +522,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
 {
        switch (layer) {
                case TCF_LAYER_LINK:
-                       return skb->data;
+                       return skb_mac_header(skb);
                case TCF_LAYER_NETWORK:
                        return skb_network_header(skb);
                case TCF_LAYER_TRANSPORT:
@@ -694,9 +694,7 @@ struct tc_cls_matchall_offload {
 };
 
 enum tc_clsbpf_command {
-       TC_CLSBPF_ADD,
-       TC_CLSBPF_REPLACE,
-       TC_CLSBPF_DESTROY,
+       TC_CLSBPF_OFFLOAD,
        TC_CLSBPF_STATS,
 };
 
@@ -705,6 +703,7 @@ struct tc_cls_bpf_offload {
        enum tc_clsbpf_command command;
        struct tcf_exts *exts;
        struct bpf_prog *prog;
+       struct bpf_prog *oldprog;
        const char *name;
        bool exts_integrated;
        u32 gen_flags;
index 9a93477..9665582 100644 (file)
@@ -168,6 +168,17 @@ static inline void red_set_vars(struct red_vars *v)
        v->qcount       = -1;
 }
 
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+{
+       if (fls(qth_min) + Wlog > 32)
+               return false;
+       if (fls(qth_max) + Wlog > 32)
+               return false;
+       if (qth_max < qth_min)
+               return false;
+       return true;
+}
+
 static inline void red_set_parms(struct red_parms *p,
                                 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
                                 u8 Scell_log, u8 *stab, u32 max_P)
@@ -179,7 +190,7 @@ static inline void red_set_parms(struct red_parms *p,
        p->qth_max      = qth_max << Wlog;
        p->Wlog         = Wlog;
        p->Plog         = Plog;
-       if (delta < 0)
+       if (delta <= 0)
                delta = 1;
        p->qth_delta    = delta;
        if (!max_P) {
index 65d0d25..becf86a 100644 (file)
@@ -71,6 +71,7 @@ struct Qdisc {
                                      * qdisc_tree_decrease_qlen() should stop.
                                      */
 #define TCQ_F_INVISIBLE                0x80 /* invisible by default in dump */
+#define TCQ_F_OFFLOADED                0x200 /* qdisc is offloaded to HW */
        u32                     limit;
        const struct Qdisc_ops  *ops;
        struct qdisc_size_table __rcu *stab;
@@ -178,6 +179,7 @@ struct Qdisc_ops {
        const struct Qdisc_class_ops    *cl_ops;
        char                    id[IFNAMSIZ];
        int                     priv_size;
+       unsigned int            static_flags;
 
        int                     (*enqueue)(struct sk_buff *skb,
                                           struct Qdisc *sch,
@@ -443,6 +445,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
                               unsigned int len);
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          const struct Qdisc_ops *ops);
+void qdisc_free(struct Qdisc *qdisc);
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
                                const struct Qdisc_ops *ops, u32 parentid);
 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
index 4a5b9a3..32ee65a 100644 (file)
@@ -48,31 +48,32 @@ static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
        /* This uses the crypto implementation of crc32c, which is either
         * implemented w/ hardware support or resolves to __crc32c_le().
         */
-       return crc32c(sum, buff, len);
+       return (__force __wsum)crc32c((__force __u32)sum, buff, len);
 }
 
 static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
                                       int offset, int len)
 {
-       return __crc32c_le_combine(csum, csum2, len);
+       return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
+                                                  (__force __u32)csum2, len);
 }
 
 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
                                        unsigned int offset)
 {
        struct sctphdr *sh = sctp_hdr(skb);
-        __le32 ret, old = sh->checksum;
        const struct skb_checksum_ops ops = {
                .update  = sctp_csum_update,
                .combine = sctp_csum_combine,
        };
+       __le32 old = sh->checksum;
+       __wsum new;
 
        sh->checksum = 0;
-       ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset,
-                                         ~(__u32)0, &ops));
+       new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, &ops);
        sh->checksum = old;
 
-       return ret;
+       return cpu_to_le32((__force __u32)new);
 }
 
 #endif /* __sctp_checksum_h__ */
index 749a428..906a9c0 100644 (file)
@@ -194,6 +194,11 @@ void sctp_remaddr_proc_exit(struct net *net);
  */
 int sctp_offload_init(void);
 
+/*
+ * sctp/stream_sched.c
+ */
+void sctp_sched_ops_init(void);
+
 /*
  * sctp/stream.c
  */
index c676550..5c5da48 100644 (file)
@@ -69,4 +69,9 @@ void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
 int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
 struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
 
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+                            struct sctp_sched_ops *sched_ops);
+void sctp_sched_ops_prio_init(void);
+void sctp_sched_ops_rr_init(void);
+
 #endif /* __sctp_stream_sched_h__ */
index 16f949e..9a5ccf0 100644 (file)
@@ -503,7 +503,8 @@ struct sctp_datamsg {
        /* Did the messenge fail to send? */
        int send_error;
        u8 send_failed:1,
-          can_delay;       /* should this message be Nagle delayed */
+          can_delay:1, /* should this message be Nagle delayed */
+          abandoned:1; /* should this message be abandoned */
 };
 
 struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
@@ -965,7 +966,7 @@ void sctp_transport_burst_limited(struct sctp_transport *);
 void sctp_transport_burst_reset(struct sctp_transport *);
 unsigned long sctp_transport_timeout(struct sctp_transport *);
 void sctp_transport_reset(struct sctp_transport *t);
-void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
 void sctp_transport_immediate_rtx(struct sctp_transport *);
 void sctp_transport_dst_release(struct sctp_transport *t);
 void sctp_transport_dst_confirm(struct sctp_transport *t);
index 79e1a2c..7a7b14e 100644 (file)
@@ -685,11 +685,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
-           sk->sk_family == AF_INET6)
-               hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
-       else
-               hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+       hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
@@ -1518,6 +1514,11 @@ static inline bool sock_owned_by_user(const struct sock *sk)
        return sk->sk_lock.owned;
 }
 
+static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
+{
+       return sk->sk_lock.owned;
+}
+
 /* no reclassification while locks are held */
 static inline bool sock_allow_reclassification(const struct sock *csk)
 {
index 524cee4..01dbfea 100644 (file)
@@ -14,7 +14,6 @@ struct tcf_sample {
        struct psample_group __rcu *psample_group;
        u32 psample_group_num;
        struct list_head tcfm_list;
-       struct rcu_head rcu;
 };
 #define to_sample(a) ((struct tcf_sample *)a)
 
index 4e09398..6da880d 100644 (file)
@@ -844,12 +844,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
 }
 #endif
 
-/* TCP_SKB_CB reference means this can not be used from early demux */
 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
        if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
-           skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+           skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
                return true;
 #endif
        return false;
@@ -1056,7 +1055,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
                            struct rate_sample *rs);
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
-                 struct rate_sample *rs);
+                 bool is_sack_reneg, struct rate_sample *rs);
 void tcp_rate_check_app_limited(struct sock *sk);
 
 /* These functions determine how the current flow behaves in respect of SACK
index 936cfc5..9185e53 100644 (file)
@@ -170,7 +170,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
 
 static inline void tls_err_abort(struct sock *sk)
 {
-       sk->sk_err = -EBADMSG;
+       sk->sk_err = EBADMSG;
        sk->sk_error_report(sk);
 }
 
index 1322339..f96391e 100644 (file)
@@ -146,7 +146,7 @@ struct vxlanhdr_gpe {
                np_applied:1,
                instance_applied:1,
                version:2,
-reserved_flags2:2;
+               reserved_flags2:2;
 #elif defined(__BIG_ENDIAN_BITFIELD)
        u8      reserved_flags2:2,
                version:2,
index dc28a98..ae35991 100644 (file)
@@ -1570,6 +1570,9 @@ int xfrm_init_state(struct xfrm_state *x);
 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_trans_queue(struct sk_buff *skb,
+                    int (*finish)(struct net *, struct sock *,
+                                  struct sk_buff *));
 int xfrm_output_resume(struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
index 0f9cbf9..6df6fe0 100644 (file)
@@ -159,11 +159,11 @@ struct expander_device {
 
 struct sata_device {
        unsigned int class;
-       struct smp_resp        rps_resp; /* report_phy_sata_resp */
        u8     port_no;        /* port number, if this is a PM (Port) */
 
        struct ata_port *ap;
        struct ata_host ata_host;
+       struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
        u8     fis[ATA_RESP_FIS_SIZE];
 };
 
index ca00130..9c14e21 100644 (file)
@@ -193,7 +193,7 @@ struct hda_dai_map {
  * @pvt_data - private data, for asoc contains asoc codec object
  */
 struct hdac_ext_device {
-       struct hdac_device hdac;
+       struct hdac_device hdev;
        struct hdac_ext_bus *ebus;
 
        /* soc-dai to nid map */
@@ -213,7 +213,7 @@ struct hdac_ext_dma_params {
        u8 stream_tag;
 };
 #define to_ehdac_device(dev) (container_of((dev), \
-                                struct hdac_ext_device, hdac))
+                                struct hdac_ext_device, hdev))
 /*
  * HD-audio codec base driver
  */
index 24febf9..e054c58 100644 (file)
@@ -169,6 +169,10 @@ struct snd_pcm_ops {
 #define SNDRV_PCM_FMTBIT_IMA_ADPCM     _SNDRV_PCM_FMTBIT(IMA_ADPCM)
 #define SNDRV_PCM_FMTBIT_MPEG          _SNDRV_PCM_FMTBIT(MPEG)
 #define SNDRV_PCM_FMTBIT_GSM           _SNDRV_PCM_FMTBIT(GSM)
+#define SNDRV_PCM_FMTBIT_S20_LE        _SNDRV_PCM_FMTBIT(S20_LE)
+#define SNDRV_PCM_FMTBIT_U20_LE        _SNDRV_PCM_FMTBIT(U20_LE)
+#define SNDRV_PCM_FMTBIT_S20_BE        _SNDRV_PCM_FMTBIT(S20_BE)
+#define SNDRV_PCM_FMTBIT_U20_BE        _SNDRV_PCM_FMTBIT(U20_BE)
 #define SNDRV_PCM_FMTBIT_SPECIAL       _SNDRV_PCM_FMTBIT(SPECIAL)
 #define SNDRV_PCM_FMTBIT_S24_3LE       _SNDRV_PCM_FMTBIT(S24_3LE)
 #define SNDRV_PCM_FMTBIT_U24_3LE       _SNDRV_PCM_FMTBIT(U24_3LE)
@@ -202,6 +206,8 @@ struct snd_pcm_ops {
 #define SNDRV_PCM_FMTBIT_FLOAT         SNDRV_PCM_FMTBIT_FLOAT_LE
 #define SNDRV_PCM_FMTBIT_FLOAT64       SNDRV_PCM_FMTBIT_FLOAT64_LE
 #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FMTBIT_S20           SNDRV_PCM_FMTBIT_S20_LE
+#define SNDRV_PCM_FMTBIT_U20           SNDRV_PCM_FMTBIT_U20_LE
 #endif
 #ifdef SNDRV_BIG_ENDIAN
 #define SNDRV_PCM_FMTBIT_S16           SNDRV_PCM_FMTBIT_S16_BE
@@ -213,6 +219,8 @@ struct snd_pcm_ops {
 #define SNDRV_PCM_FMTBIT_FLOAT         SNDRV_PCM_FMTBIT_FLOAT_BE
 #define SNDRV_PCM_FMTBIT_FLOAT64       SNDRV_PCM_FMTBIT_FLOAT64_BE
 #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FMTBIT_S20           SNDRV_PCM_FMTBIT_S20_BE
+#define SNDRV_PCM_FMTBIT_U20           SNDRV_PCM_FMTBIT_U20_BE
 #endif
 
 struct snd_pcm_file {
index ef18494..64d027d 100644 (file)
@@ -14,6 +14,8 @@
 
 struct rt5514_platform_data {
        unsigned int dmic_init_delay;
+       const char *dsp_calib_clk_name;
+       unsigned int dsp_calib_clk_rate;
 };
 
 #endif
index d0c33a9..f218c74 100644 (file)
@@ -25,6 +25,9 @@ struct rt5645_platform_data {
        bool level_trigger_irq;
        /* Invert JD1_1 status polarity */
        bool inv_jd1_1;
+
+       /* Value to asign to snd_soc_card.long_name */
+       const char *long_name;
 };
 
 #endif
index 1a9191c..9da6388 100644 (file)
@@ -16,6 +16,7 @@
 #ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
 #define __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
 
+#include <linux/module.h>
 #include <linux/stddef.h>
 #include <linux/acpi.h>
 
index a7d8d33..0822242 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/stddef.h>
 #include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
 
 struct snd_soc_acpi_package_context {
        char *name;           /* package name */
@@ -26,17 +27,13 @@ struct snd_soc_acpi_package_context {
        bool data_valid;
 };
 
+/* codec name is used in DAIs is i2c-<HID>:00 with HID being 8 chars */
+#define SND_ACPI_I2C_ID_LEN (4 + ACPI_ID_LEN + 3 + 1)
+
 #if IS_ENABLED(CONFIG_ACPI)
-/* translation fron HID to I2C name, needed for DAI codec_name */
-const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
 bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
                                    struct snd_soc_acpi_package_context *ctx);
 #else
-static inline const char *
-snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
-       return NULL;
-}
 static inline bool
 snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
                                   struct snd_soc_acpi_package_context *ctx)
@@ -49,9 +46,6 @@ snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
 struct snd_soc_acpi_mach *
 snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
 
-/* acpi check hid */
-bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN]);
-
 /**
  * snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
  * related to the hardware, except for the firmware and topology file names.
index 58acd00..8ad1166 100644 (file)
@@ -102,6 +102,8 @@ struct snd_compr_stream;
                               SNDRV_PCM_FMTBIT_S16_BE |\
                               SNDRV_PCM_FMTBIT_S20_3LE |\
                               SNDRV_PCM_FMTBIT_S20_3BE |\
+                              SNDRV_PCM_FMTBIT_S20_LE |\
+                              SNDRV_PCM_FMTBIT_S20_BE |\
                               SNDRV_PCM_FMTBIT_S24_3LE |\
                               SNDRV_PCM_FMTBIT_S24_3BE |\
                                SNDRV_PCM_FMTBIT_S32_LE |\
@@ -294,9 +296,6 @@ struct snd_soc_dai {
        /* DAI runtime info */
        unsigned int capture_active:1;          /* stream is in use */
        unsigned int playback_active:1;         /* stream is in use */
-       unsigned int symmetric_rates:1;
-       unsigned int symmetric_channels:1;
-       unsigned int symmetric_samplebits:1;
        unsigned int probed:1;
 
        unsigned int active;
index 1a73232..b655d98 100644 (file)
@@ -494,6 +494,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
 int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
 #endif
 
+void snd_soc_disconnect_sync(struct device *dev);
+
 struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
                const char *dai_link, int stream);
 struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
@@ -802,6 +804,9 @@ struct snd_soc_component_driver {
        int (*suspend)(struct snd_soc_component *);
        int (*resume)(struct snd_soc_component *);
 
+       unsigned int (*read)(struct snd_soc_component *, unsigned int);
+       int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
+
        /* pcm creation and destruction */
        int (*pcm_new)(struct snd_soc_pcm_runtime *);
        void (*pcm_free)(struct snd_pcm *);
@@ -858,12 +863,10 @@ struct snd_soc_component {
        struct list_head card_aux_list; /* for auxiliary bound components */
        struct list_head card_list;
 
-       struct snd_soc_dai_driver *dai_drv;
-       int num_dai;
-
        const struct snd_soc_component_driver *driver;
 
        struct list_head dai_list;
+       int num_dai;
 
        int (*read)(struct snd_soc_component *, unsigned int, unsigned int *);
        int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
index 7586072..2cd4493 100644 (file)
@@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent,
 
        TP_STRUCT__entry(
                __string(        name,           core->name                )
-               __string(        pname,          parent->name              )
+               __string(        pname, parent ? parent->name : "none"     )
        ),
 
        TP_fast_assign(
                __assign_str(name, core->name);
-               __assign_str(pname, parent->name);
+               __assign_str(pname, parent ? parent->name : "none");
        ),
 
        TP_printk("%s %s", __get_str(name), __get_str(pname))
index e4b0b8e..2c735a3 100644 (file)
@@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq,
        { KVM_TRACE_MMIO_WRITE, "write" }
 
 TRACE_EVENT(kvm_mmio,
-       TP_PROTO(int type, int len, u64 gpa, u64 val),
+       TP_PROTO(int type, int len, u64 gpa, void *val),
        TP_ARGS(type, len, gpa, val),
 
        TP_STRUCT__entry(
@@ -225,7 +225,10 @@ TRACE_EVENT(kvm_mmio,
                __entry->type           = type;
                __entry->len            = len;
                __entry->gpa            = gpa;
-               __entry->val            = val;
+               __entry->val            = 0;
+               if (val)
+                       memcpy(&__entry->val, val,
+                              min_t(u32, sizeof(__entry->val), len));
        ),
 
        TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
index f5024c5..9c4eb33 100644 (file)
@@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
 
 #include <trace/define_trace.h>
 
-#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
 
+#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
 #define trace_irq_enable(...)
 #define trace_irq_disable(...)
-#define trace_preempt_enable(...)
-#define trace_preempt_disable(...)
 #define trace_irq_enable_rcuidle(...)
 #define trace_irq_disable_rcuidle(...)
+#endif
+
+#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
 #define trace_preempt_enable_rcuidle(...)
 #define trace_preempt_disable_rcuidle(...)
-
 #endif
index ebe9679..36cb50c 100644 (file)
@@ -49,6 +49,7 @@ enum rxrpc_conn_trace {
        rxrpc_conn_put_client,
        rxrpc_conn_put_service,
        rxrpc_conn_queued,
+       rxrpc_conn_reap_service,
        rxrpc_conn_seen,
 };
 
@@ -138,10 +139,24 @@ enum rxrpc_rtt_rx_trace {
 
 enum rxrpc_timer_trace {
        rxrpc_timer_begin,
+       rxrpc_timer_exp_ack,
+       rxrpc_timer_exp_hard,
+       rxrpc_timer_exp_idle,
+       rxrpc_timer_exp_keepalive,
+       rxrpc_timer_exp_lost_ack,
+       rxrpc_timer_exp_normal,
+       rxrpc_timer_exp_ping,
+       rxrpc_timer_exp_resend,
        rxrpc_timer_expired,
        rxrpc_timer_init_for_reply,
        rxrpc_timer_init_for_send_reply,
+       rxrpc_timer_restart,
        rxrpc_timer_set_for_ack,
+       rxrpc_timer_set_for_hard,
+       rxrpc_timer_set_for_idle,
+       rxrpc_timer_set_for_keepalive,
+       rxrpc_timer_set_for_lost_ack,
+       rxrpc_timer_set_for_normal,
        rxrpc_timer_set_for_ping,
        rxrpc_timer_set_for_resend,
        rxrpc_timer_set_for_send,
@@ -150,6 +165,7 @@ enum rxrpc_timer_trace {
 enum rxrpc_propose_ack_trace {
        rxrpc_propose_ack_client_tx_end,
        rxrpc_propose_ack_input_data,
+       rxrpc_propose_ack_ping_for_keepalive,
        rxrpc_propose_ack_ping_for_lost_ack,
        rxrpc_propose_ack_ping_for_lost_reply,
        rxrpc_propose_ack_ping_for_params,
@@ -206,6 +222,7 @@ enum rxrpc_congest_change {
        EM(rxrpc_conn_put_client,               "PTc") \
        EM(rxrpc_conn_put_service,              "PTs") \
        EM(rxrpc_conn_queued,                   "QUE") \
+       EM(rxrpc_conn_reap_service,             "RPs") \
        E_(rxrpc_conn_seen,                     "SEE")
 
 #define rxrpc_client_traces \
@@ -296,16 +313,31 @@ enum rxrpc_congest_change {
 #define rxrpc_timer_traces \
        EM(rxrpc_timer_begin,                   "Begin ") \
        EM(rxrpc_timer_expired,                 "*EXPR*") \
+       EM(rxrpc_timer_exp_ack,                 "ExpAck") \
+       EM(rxrpc_timer_exp_hard,                "ExpHrd") \
+       EM(rxrpc_timer_exp_idle,                "ExpIdl") \
+       EM(rxrpc_timer_exp_keepalive,           "ExpKA ") \
+       EM(rxrpc_timer_exp_lost_ack,            "ExpLoA") \
+       EM(rxrpc_timer_exp_normal,              "ExpNml") \
+       EM(rxrpc_timer_exp_ping,                "ExpPng") \
+       EM(rxrpc_timer_exp_resend,              "ExpRsn") \
        EM(rxrpc_timer_init_for_reply,          "IniRpl") \
        EM(rxrpc_timer_init_for_send_reply,     "SndRpl") \
+       EM(rxrpc_timer_restart,                 "Restrt") \
        EM(rxrpc_timer_set_for_ack,             "SetAck") \
+       EM(rxrpc_timer_set_for_hard,            "SetHrd") \
+       EM(rxrpc_timer_set_for_idle,            "SetIdl") \
+       EM(rxrpc_timer_set_for_keepalive,       "KeepAl") \
+       EM(rxrpc_timer_set_for_lost_ack,        "SetLoA") \
+       EM(rxrpc_timer_set_for_normal,          "SetNml") \
        EM(rxrpc_timer_set_for_ping,            "SetPng") \
        EM(rxrpc_timer_set_for_resend,          "SetRTx") \
-       E_(rxrpc_timer_set_for_send,            "SetTx ")
+       E_(rxrpc_timer_set_for_send,            "SetSnd")
 
 #define rxrpc_propose_ack_traces \
        EM(rxrpc_propose_ack_client_tx_end,     "ClTxEnd") \
        EM(rxrpc_propose_ack_input_data,        "DataIn ") \
+       EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
        EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
        EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
        EM(rxrpc_propose_ack_ping_for_params,   "Params ") \
@@ -932,39 +964,47 @@ TRACE_EVENT(rxrpc_rtt_rx,
 
 TRACE_EVENT(rxrpc_timer,
            TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
-                    ktime_t now, unsigned long now_j),
+                    unsigned long now),
 
-           TP_ARGS(call, why, now, now_j),
+           TP_ARGS(call, why, now),
 
            TP_STRUCT__entry(
                    __field(struct rxrpc_call *,                call            )
                    __field(enum rxrpc_timer_trace,             why             )
-                   __field_struct(ktime_t,                     now             )
-                   __field_struct(ktime_t,                     expire_at       )
-                   __field_struct(ktime_t,                     ack_at          )
-                   __field_struct(ktime_t,                     resend_at       )
-                   __field(unsigned long,                      now_j           )
-                   __field(unsigned long,                      timer           )
+                   __field(long,                               now             )
+                   __field(long,                               ack_at          )
+                   __field(long,                               ack_lost_at     )
+                   __field(long,                               resend_at       )
+                   __field(long,                               ping_at         )
+                   __field(long,                               expect_rx_by    )
+                   __field(long,                               expect_req_by   )
+                   __field(long,                               expect_term_by  )
+                   __field(long,                               timer           )
                             ),
 
            TP_fast_assign(
-                   __entry->call       = call;
-                   __entry->why        = why;
-                   __entry->now        = now;
-                   __entry->expire_at  = call->expire_at;
-                   __entry->ack_at     = call->ack_at;
-                   __entry->resend_at  = call->resend_at;
-                   __entry->now_j      = now_j;
-                   __entry->timer      = call->timer.expires;
+                   __entry->call               = call;
+                   __entry->why                = why;
+                   __entry->now                = now;
+                   __entry->ack_at             = call->ack_at;
+                   __entry->ack_lost_at        = call->ack_lost_at;
+                   __entry->resend_at          = call->resend_at;
+                   __entry->expect_rx_by       = call->expect_rx_by;
+                   __entry->expect_req_by      = call->expect_req_by;
+                   __entry->expect_term_by     = call->expect_term_by;
+                   __entry->timer              = call->timer.expires;
                           ),
 
-           TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+           TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
                      __entry->call,
                      __print_symbolic(__entry->why, rxrpc_timer_traces),
-                     ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
-                     ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
-                     ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
-                     __entry->timer - __entry->now_j)
+                     __entry->ack_at - __entry->now,
+                     __entry->ack_lost_at - __entry->now,
+                     __entry->resend_at - __entry->now,
+                     __entry->expect_rx_by - __entry->now,
+                     __entry->expect_req_by - __entry->now,
+                     __entry->expect_term_by - __entry->now,
+                     __entry->timer - __entry->now)
            );
 
 TRACE_EVENT(rxrpc_rx_lose,
@@ -1080,7 +1120,7 @@ TRACE_EVENT(rxrpc_congest,
                    memcpy(&__entry->sum, summary, sizeof(__entry->sum));
                           ),
 
-           TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+           TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
                      __entry->call,
                      __entry->ack_serial,
                      __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
index 07cccca..ab34c56 100644 (file)
                tcp_state_name(TCP_CLOSING),            \
                tcp_state_name(TCP_NEW_SYN_RECV))
 
+#define TP_STORE_V4MAPPED(__entry, saddr, daddr)               \
+       do {                                                    \
+               struct in6_addr *pin6;                          \
+                                                               \
+               pin6 = (struct in6_addr *)__entry->saddr_v6;    \
+               ipv6_addr_set_v4mapped(saddr, pin6);            \
+               pin6 = (struct in6_addr *)__entry->daddr_v6;    \
+               ipv6_addr_set_v4mapped(daddr, pin6);            \
+       } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6)          \
+       do {                                                            \
+               if (sk->sk_family == AF_INET6) {                        \
+                       struct in6_addr *pin6;                          \
+                                                                       \
+                       pin6 = (struct in6_addr *)__entry->saddr_v6;    \
+                       *pin6 = saddr6;                                 \
+                       pin6 = (struct in6_addr *)__entry->daddr_v6;    \
+                       *pin6 = daddr6;                                 \
+               } else {                                                \
+                       TP_STORE_V4MAPPED(__entry, saddr, daddr);       \
+               }                                                       \
+       } while (0)
+#else
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6)  \
+       TP_STORE_V4MAPPED(__entry, saddr, daddr)
+#endif
+
 /*
  * tcp event with arguments sk and skb
  *
@@ -50,7 +79,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skbaddr = skb;
@@ -65,20 +93,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                             sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -127,7 +143,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -141,20 +156,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                              sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -197,7 +200,6 @@ TRACE_EVENT(tcp_set_state,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -213,20 +215,8 @@ TRACE_EVENT(tcp_set_state,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                              sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@@ -256,7 +246,6 @@ TRACE_EVENT(tcp_retransmit_synack,
 
        TP_fast_assign(
                struct inet_request_sock *ireq = inet_rsk(req);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -271,20 +260,8 @@ TRACE_EVENT(tcp_retransmit_synack,
                p32 = (__be32 *) __entry->daddr;
                *p32 = ireq->ir_rmt_addr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = ireq->ir_v6_loc_addr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = ireq->ir_v6_rmt_addr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr,
+                             ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
index 7894664..135e542 100644 (file)
@@ -94,9 +94,9 @@ TRACE_EVENT(thermal_zone_trip,
 #ifdef CONFIG_CPU_THERMAL
 TRACE_EVENT(thermal_power_cpu_get_power,
        TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
-               size_t load_len, u32 dynamic_power, u32 static_power),
+               size_t load_len, u32 dynamic_power),
 
-       TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power),
+       TP_ARGS(cpus, freq, load, load_len, dynamic_power),
 
        TP_STRUCT__entry(
                __bitmask(cpumask, num_possible_cpus())
@@ -104,7 +104,6 @@ TRACE_EVENT(thermal_power_cpu_get_power,
                __dynamic_array(u32,   load, load_len)
                __field(size_t,        load_len      )
                __field(u32,           dynamic_power )
-               __field(u32,           static_power  )
        ),
 
        TP_fast_assign(
@@ -115,13 +114,12 @@ TRACE_EVENT(thermal_power_cpu_get_power,
                        load_len * sizeof(*load));
                __entry->load_len = load_len;
                __entry->dynamic_power = dynamic_power;
-               __entry->static_power = static_power;
        ),
 
-       TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d",
+       TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d",
                __get_bitmask(cpumask), __entry->freq,
                __print_array(__get_dynamic_array(load), __entry->load_len, 4),
-               __entry->dynamic_power, __entry->static_power)
+               __entry->dynamic_power)
 );
 
 TRACE_EVENT(thermal_power_cpu_limit,
index 4cd0f05..8989a92 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/netdevice.h>
 #include <linux/filter.h>
 #include <linux/tracepoint.h>
+#include <linux/bpf.h>
 
 #define __XDP_ACT_MAP(FN)      \
        FN(ABORTED)             \
diff --git a/include/uapi/asm-generic/bpf_perf_event.h b/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..53815d2
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+
+#include <linux/ptrace.h>
+
+/* Export kernel pt_regs structure */
+typedef struct pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
index 90fc490..821f71a 100644 (file)
@@ -91,7 +91,7 @@ PTR_FIELD(PTR_GEN,                    0,  8)
 
 #define PTR_CHECK_DEV                  ((1 << PTR_DEV_BITS) - 1)
 
-#define PTR(gen, offset, dev)                                          \
+#define MAKE_PTR(gen, offset, dev)                                     \
        ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
 
 /* Bkey utility code */
index 73445ef..940b047 100644 (file)
@@ -76,7 +76,7 @@ struct bfs_super_block {
 #define BFS_FILEBLOCKS(ip) \
         ((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) -  le32_to_cpu((ip)->i_sblock))
 #define BFS_UNCLEAN(bfs_sb, sb)        \
-       ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY))
+       ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & SB_RDONLY))
 
 
 #endif /* _LINUX_BFS_FS_H */
index af549d4..8f95303 100644 (file)
@@ -8,11 +8,10 @@
 #ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
 #define _UAPI__LINUX_BPF_PERF_EVENT_H__
 
-#include <linux/types.h>
-#include <linux/ptrace.h>
+#include <asm/bpf_perf_event.h>
 
 struct bpf_perf_event_data {
-       struct pt_regs regs;
+       bpf_user_pt_regs_t regs;
        __u64 sample_period;
 };
 
index 3ee3bf7..144de4d 100644 (file)
@@ -23,6 +23,7 @@
 #define _UAPI_LINUX_IF_ETHER_H
 
 #include <linux/types.h>
+#include <linux/libc-compat.h>
 
 /*
  *     IEEE 802.3 Ethernet magic constants.  The frame sizes omit the preamble
  *     This is an Ethernet frame header.
  */
 
+#if __UAPI_DEF_ETHHDR
 struct ethhdr {
        unsigned char   h_dest[ETH_ALEN];       /* destination eth addr */
        unsigned char   h_source[ETH_ALEN];     /* source ether addr    */
        __be16          h_proto;                /* packet type ID field */
 } __attribute__((packed));
+#endif
 
 
 #endif /* _UAPI_LINUX_IF_ETHER_H */
index 731d0df..6e80501 100644 (file)
@@ -233,29 +233,29 @@ struct kfd_ioctl_wait_events_args {
 };
 
 struct kfd_ioctl_set_scratch_backing_va_args {
-       uint64_t va_addr;       /* to KFD */
-       uint32_t gpu_id;        /* to KFD */
-       uint32_t pad;
+       __u64 va_addr;  /* to KFD */
+       __u32 gpu_id;   /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_get_tile_config_args {
        /* to KFD: pointer to tile array */
-       uint64_t tile_config_ptr;
+       __u64 tile_config_ptr;
        /* to KFD: pointer to macro tile array */
-       uint64_t macro_tile_config_ptr;
+       __u64 macro_tile_config_ptr;
        /* to KFD: array size allocated by user mode
         * from KFD: array size filled by kernel
         */
-       uint32_t num_tile_configs;
+       __u32 num_tile_configs;
        /* to KFD: array size allocated by user mode
         * from KFD: array size filled by kernel
         */
-       uint32_t num_macro_tile_configs;
+       __u32 num_macro_tile_configs;
 
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t gb_addr_config;        /* from KFD */
-       uint32_t num_banks;             /* from KFD */
-       uint32_t num_ranks;             /* from KFD */
+       __u32 gpu_id;           /* to KFD */
+       __u32 gb_addr_config;   /* from KFD */
+       __u32 num_banks;                /* from KFD */
+       __u32 num_ranks;                /* from KFD */
        /* struct size can be extended later if needed
         * without breaking ABI compatibility
         */
index 282d761..8fb90a0 100644 (file)
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
 
 struct kvm_s390_irq_state {
        __u64 buf;
-       __u32 flags;
+       __u32 flags;        /* will stay unused for compatibility reasons */
        __u32 len;
-       __u32 reserved[4];
+       __u32 reserved[4];  /* will stay unused for compatibility reasons */
 };
 
 /* for KVM_SET_GUEST_DEBUG */
@@ -932,6 +932,8 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_HYPERV_SYNIC2 148
 #define KVM_CAP_HYPERV_VP_INDEX 149
 #define KVM_CAP_S390_AIS_MIGRATION 150
+#define KVM_CAP_PPC_GET_CPU_CHAR 151
+#define KVM_CAP_S390_BPB 152
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1261,6 +1263,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_PPC_CONFIGURE_V3_MMU  _IOW(KVMIO,  0xaf, struct kvm_ppc_mmuv3_cfg)
 /* Available with KVM_CAP_PPC_RADIX_MMU */
 #define KVM_PPC_GET_RMMU_INFO    _IOW(KVMIO,  0xb0, struct kvm_ppc_rmmu_info)
+/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
+#define KVM_PPC_GET_CPU_CHAR     _IOR(KVMIO,  0xb1, struct kvm_ppc_cpu_char)
 
 /* ioctl for vm fd */
 #define KVM_CREATE_DEVICE        _IOWR(KVMIO,  0xe0, struct kvm_create_device)
index 282875c..fc29efa 100644 (file)
 
 /* If we did not see any headers from any supported C libraries,
  * or we are being included in the kernel, then define everything
- * that we need. */
+ * that we need. Check for previous __UAPI_* definitions to give
+ * unsupported C libraries a way to opt out of any kernel definition. */
 #else /* !defined(__GLIBC__) */
 
 /* Definitions for if.h */
+#ifndef __UAPI_DEF_IF_IFCONF
 #define __UAPI_DEF_IF_IFCONF 1
+#endif
+#ifndef __UAPI_DEF_IF_IFMAP
 #define __UAPI_DEF_IF_IFMAP 1
+#endif
+#ifndef __UAPI_DEF_IF_IFNAMSIZ
 #define __UAPI_DEF_IF_IFNAMSIZ 1
+#endif
+#ifndef __UAPI_DEF_IF_IFREQ
 #define __UAPI_DEF_IF_IFREQ 1
+#endif
 /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+#endif
 /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif
 
 /* Definitions for in.h */
+#ifndef __UAPI_DEF_IN_ADDR
 #define __UAPI_DEF_IN_ADDR             1
+#endif
+#ifndef __UAPI_DEF_IN_IPPROTO
 #define __UAPI_DEF_IN_IPPROTO          1
+#endif
+#ifndef __UAPI_DEF_IN_PKTINFO
 #define __UAPI_DEF_IN_PKTINFO          1
+#endif
+#ifndef __UAPI_DEF_IP_MREQ
 #define __UAPI_DEF_IP_MREQ             1
+#endif
+#ifndef __UAPI_DEF_SOCKADDR_IN
 #define __UAPI_DEF_SOCKADDR_IN         1
+#endif
+#ifndef __UAPI_DEF_IN_CLASS
 #define __UAPI_DEF_IN_CLASS            1
+#endif
 
 /* Definitions for in6.h */
+#ifndef __UAPI_DEF_IN6_ADDR
 #define __UAPI_DEF_IN6_ADDR            1
+#endif
+#ifndef __UAPI_DEF_IN6_ADDR_ALT
 #define __UAPI_DEF_IN6_ADDR_ALT                1
+#endif
+#ifndef __UAPI_DEF_SOCKADDR_IN6
 #define __UAPI_DEF_SOCKADDR_IN6                1
+#endif
+#ifndef __UAPI_DEF_IPV6_MREQ
 #define __UAPI_DEF_IPV6_MREQ           1
+#endif
+#ifndef __UAPI_DEF_IPPROTO_V6
 #define __UAPI_DEF_IPPROTO_V6          1
+#endif
+#ifndef __UAPI_DEF_IPV6_OPTIONS
 #define __UAPI_DEF_IPV6_OPTIONS                1
+#endif
+#ifndef __UAPI_DEF_IN6_PKTINFO
 #define __UAPI_DEF_IN6_PKTINFO         1
+#endif
+#ifndef __UAPI_DEF_IP6_MTUINFO
 #define __UAPI_DEF_IP6_MTUINFO         1
+#endif
 
 /* Definitions for ipx.h */
+#ifndef __UAPI_DEF_SOCKADDR_IPX
 #define __UAPI_DEF_SOCKADDR_IPX                        1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
 #define __UAPI_DEF_IPX_ROUTE_DEFINITION                1
+#endif
+#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
 #define __UAPI_DEF_IPX_INTERFACE_DEFINITION    1
+#endif
+#ifndef __UAPI_DEF_IPX_CONFIG_DATA
 #define __UAPI_DEF_IPX_CONFIG_DATA             1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEF
 #define __UAPI_DEF_IPX_ROUTE_DEF               1
+#endif
 
 /* Definitions for xattr.h */
+#ifndef __UAPI_DEF_XATTR
 #define __UAPI_DEF_XATTR               1
+#endif
 
 #endif /* __GLIBC__ */
 
+/* Definitions for if_ether.h */
+/* allow libcs like musl to deactivate this, glibc does not implement this. */
+#ifndef __UAPI_DEF_ETHHDR
+#define __UAPI_DEF_ETHHDR              1
+#endif
+
 #endif /* _UAPI_LIBC_COMPAT_H */
index 3fea770..57ccfb3 100644 (file)
@@ -36,7 +36,7 @@ enum ip_conntrack_info {
 
 #define NF_CT_STATE_INVALID_BIT                        (1 << 0)
 #define NF_CT_STATE_BIT(ctinfo)                        (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
-#define NF_CT_STATE_UNTRACKED_BIT              (1 << (IP_CT_UNTRACKED + 1))
+#define NF_CT_STATE_UNTRACKED_BIT              (1 << 6)
 
 /* Bitset representing status of connection. */
 enum ip_conntrack_status {
index 4265d7f..dcfab5e 100644 (file)
@@ -363,7 +363,6 @@ enum ovs_tunnel_key_attr {
        OVS_TUNNEL_KEY_ATTR_IPV6_SRC,           /* struct in6_addr src IPv6 address. */
        OVS_TUNNEL_KEY_ATTR_IPV6_DST,           /* struct in6_addr dst IPv6 address. */
        OVS_TUNNEL_KEY_ATTR_PAD,
-       OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,        /* be32 ERSPAN index. */
        __OVS_TUNNEL_KEY_ATTR_MAX
 };
 
index af3cc2f..37b5096 100644 (file)
@@ -256,7 +256,6 @@ struct tc_red_qopt {
 #define TC_RED_ECN             1
 #define TC_RED_HARDDROP                2
 #define TC_RED_ADAPTATIVE      4
-#define TC_RED_OFFLOADED       8
 };
 
 struct tc_red_xstats {
index d8b5f80..843e29a 100644 (file)
@@ -557,6 +557,7 @@ enum {
        TCA_PAD,
        TCA_DUMP_INVISIBLE,
        TCA_CHAIN,
+       TCA_HW_OFFLOAD,
        __TCA_MAX
 };
 
index 9d4afea..9335d92 100644 (file)
@@ -59,6 +59,7 @@ enum rxrpc_cmsg_type {
        RXRPC_EXCLUSIVE_CALL    = 10,   /* s-: Call should be on exclusive connection */
        RXRPC_UPGRADE_SERVICE   = 11,   /* s-: Request service upgrade for client call */
        RXRPC_TX_LENGTH         = 12,   /* s-: Total length of Tx data */
+       RXRPC_SET_CALL_TIMEOUT  = 13,   /* s-: Set one or more call timeouts */
        RXRPC__SUPPORTED
 };
 
index 41a0a81..c4c79aa 100644 (file)
@@ -880,6 +880,8 @@ struct usb_wireless_cap_descriptor {        /* Ultra Wide Band */
        __u8  bReserved;
 } __attribute__((packed));
 
+#define USB_DT_USB_WIRELESS_CAP_SIZE   11
+
 /* USB 2.0 Extension descriptor */
 #define        USB_CAP_TYPE_EXT                2
 
@@ -1072,6 +1074,7 @@ struct usb_ptm_cap_descriptor {
        __u8  bDevCapabilityType;
 } __attribute__((packed));
 
+#define USB_DT_USB_PTM_ID_SIZE         3
 /*
  * The size of the descriptor for the Sublink Speed Attribute Count
  * (SSAC) specified in bmAttributes[4:0].
index 14cd7dc..0b4dd54 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /* AF_VSOCK sock_diag(7) interface for querying open sockets */
 
 #ifndef _UAPI__VM_SOCKETS_DIAG_H__
index c227ccb..07d6158 100644 (file)
@@ -214,6 +214,11 @@ typedef int __bitwise snd_pcm_format_t;
 #define        SNDRV_PCM_FORMAT_IMA_ADPCM      ((__force snd_pcm_format_t) 22)
 #define        SNDRV_PCM_FORMAT_MPEG           ((__force snd_pcm_format_t) 23)
 #define        SNDRV_PCM_FORMAT_GSM            ((__force snd_pcm_format_t) 24)
+#define        SNDRV_PCM_FORMAT_S20_LE ((__force snd_pcm_format_t) 25) /* in four bytes, LSB justified */
+#define        SNDRV_PCM_FORMAT_S20_BE ((__force snd_pcm_format_t) 26) /* in four bytes, LSB justified */
+#define        SNDRV_PCM_FORMAT_U20_LE ((__force snd_pcm_format_t) 27) /* in four bytes, LSB justified */
+#define        SNDRV_PCM_FORMAT_U20_BE ((__force snd_pcm_format_t) 28) /* in four bytes, LSB justified */
+/* gap in the numbering for a future standard linear format */
 #define        SNDRV_PCM_FORMAT_SPECIAL        ((__force snd_pcm_format_t) 31)
 #define        SNDRV_PCM_FORMAT_S24_3LE        ((__force snd_pcm_format_t) 32) /* in three bytes */
 #define        SNDRV_PCM_FORMAT_S24_3BE        ((__force snd_pcm_format_t) 33) /* in three bytes */
@@ -248,6 +253,8 @@ typedef int __bitwise snd_pcm_format_t;
 #define        SNDRV_PCM_FORMAT_FLOAT          SNDRV_PCM_FORMAT_FLOAT_LE
 #define        SNDRV_PCM_FORMAT_FLOAT64        SNDRV_PCM_FORMAT_FLOAT64_LE
 #define        SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
+#define        SNDRV_PCM_FORMAT_S20            SNDRV_PCM_FORMAT_S20_LE
+#define        SNDRV_PCM_FORMAT_U20            SNDRV_PCM_FORMAT_U20_LE
 #endif
 #ifdef SNDRV_BIG_ENDIAN
 #define        SNDRV_PCM_FORMAT_S16            SNDRV_PCM_FORMAT_S16_BE
@@ -259,6 +266,8 @@ typedef int __bitwise snd_pcm_format_t;
 #define        SNDRV_PCM_FORMAT_FLOAT          SNDRV_PCM_FORMAT_FLOAT_BE
 #define        SNDRV_PCM_FORMAT_FLOAT64        SNDRV_PCM_FORMAT_FLOAT64_BE
 #define        SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
+#define        SNDRV_PCM_FORMAT_S20            SNDRV_PCM_FORMAT_S20_BE
+#define        SNDRV_PCM_FORMAT_U20            SNDRV_PCM_FORMAT_U20_BE
 #endif
 
 typedef int __bitwise snd_pcm_subformat_t;
index 326054a..8ba0112 100644 (file)
  * %SKL_TKN_MM_U32_NUM_IN_FMT:  Number of input formats
  * %SKL_TKN_MM_U32_NUM_OUT_FMT: Number of output formats
  *
+ * %SKL_TKN_U32_ASTATE_IDX:     Table Index for the A-State entry to be filled
+ *                              with kcps and clock source
+ *
+ * %SKL_TKN_U32_ASTATE_COUNT:   Number of valid entries in A-State table
+ *
+ * %SKL_TKN_U32_ASTATE_KCPS:    Specifies the core load threshold (in kilo
+ *                              cycles per second) below which DSP is clocked
+ *                              from source specified by clock source.
+ *
+ * %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry
+ *
  * module_id and loadable flags dont have tokens as these values will be
  * read from the DSP FW manifest
  *
@@ -309,7 +320,11 @@ enum SKL_TKNS {
        SKL_TKN_MM_U32_NUM_IN_FMT,
        SKL_TKN_MM_U32_NUM_OUT_FMT,
 
-       SKL_TKN_MAX = SKL_TKN_MM_U32_NUM_OUT_FMT,
+       SKL_TKN_U32_ASTATE_IDX,
+       SKL_TKN_U32_ASTATE_COUNT,
+       SKL_TKN_U32_ASTATE_KCPS,
+       SKL_TKN_U32_ASTATE_CLK_SRC,
+       SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC,
 };
 
 #endif
index 4914b93..61f410f 100644 (file)
@@ -44,3 +44,8 @@ static inline void xen_balloon_init(void)
 {
 }
 #endif
+
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+struct resource;
+void arch_xen_balloon_init(struct resource *hostmem_resource);
+#endif
index 2934249..a9a2e2c 100644 (file)
@@ -461,10 +461,15 @@ endmenu # "CPU/Task time and stats accounting"
 
 config CPU_ISOLATION
        bool "CPU isolation"
+       depends on SMP || COMPILE_TEST
+       default y
        help
          Make sure that CPUs running critical tasks are not disturbed by
          any source of "noise" such as unbound workqueues, timers, kthreads...
-         Unbound jobs get offloaded to housekeeping CPUs.
+         Unbound jobs get offloaded to housekeeping CPUs. This is driven by
+         the "isolcpus=" boot parameter.
+
+         Say Y if unsure.
 
 source "kernel/rcu/Kconfig"
 
@@ -1392,6 +1397,13 @@ config BPF_SYSCALL
          Enable the bpf() system call that allows to manipulate eBPF
          programs and maps via file descriptors.
 
+config BPF_JIT_ALWAYS_ON
+       bool "Permanently enable BPF JIT and remove BPF interpreter"
+       depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
+       help
+         Enables BPF JIT and removes BPF interpreter to avoid
+         speculative execution of BPF instructions by the interpreter
+
 config USERFAULTFD
        bool "Enable userfaultfd() system call"
        select ANON_INODES
index 1dbb237..a3e5ce2 100644 (file)
@@ -13,9 +13,7 @@ obj-$(CONFIG_BLK_DEV_INITRD)   += initramfs.o
 endif
 obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
 
-ifneq ($(CONFIG_ARCH_INIT_TASK),y)
 obj-y                          += init_task.o
-endif
 
 mounts-y                       := do_mounts.o
 mounts-$(CONFIG_BLK_DEV_RAM)   += do_mounts_rd.o
index 9325fee..3ac6e75 100644 (file)
 #include <asm/pgtable.h>
 #include <linux/uaccess.h>
 
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+static struct signal_struct init_signals = {
+       .nr_threads     = 1,
+       .thread_head    = LIST_HEAD_INIT(init_task.thread_node),
+       .wait_chldexit  = __WAIT_QUEUE_HEAD_INITIALIZER(init_signals.wait_chldexit),
+       .shared_pending = {
+               .list = LIST_HEAD_INIT(init_signals.shared_pending.list),
+               .signal =  {{0}}
+       },
+       .rlim           = INIT_RLIMITS,
+       .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
+#ifdef CONFIG_POSIX_TIMERS
+       .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
+       .cputimer       = {
+               .cputime_atomic = INIT_CPUTIME_ATOMIC,
+               .running        = false,
+               .checking_timer = false,
+       },
+#endif
+       INIT_CPU_TIMERS(init_signals)
+       INIT_PREV_CPUTIME(init_signals)
+};
+
+static struct sighand_struct init_sighand = {
+       .count          = ATOMIC_INIT(1),
+       .action         = { { { .sa_handler = SIG_DFL, } }, },
+       .siglock        = __SPIN_LOCK_UNLOCKED(init_sighand.siglock),
+       .signalfd_wqh   = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
+};
 
-/* Initial task structure */
-struct task_struct init_task = INIT_TASK(init_task);
+/*
+ * Set up the first task table, touch at your own risk!. Base=0,
+ * limit=0x1fffff (=2MB)
+ */
+struct task_struct init_task
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+       __init_task_data
+#endif
+= {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+       .thread_info    = INIT_THREAD_INFO(init_task),
+       .stack_refcount = ATOMIC_INIT(1),
+#endif
+       .state          = 0,
+       .stack          = init_stack,
+       .usage          = ATOMIC_INIT(2),
+       .flags          = PF_KTHREAD,
+       .prio           = MAX_PRIO - 20,
+       .static_prio    = MAX_PRIO - 20,
+       .normal_prio    = MAX_PRIO - 20,
+       .policy         = SCHED_NORMAL,
+       .cpus_allowed   = CPU_MASK_ALL,
+       .nr_cpus_allowed= NR_CPUS,
+       .mm             = NULL,
+       .active_mm      = &init_mm,
+       .restart_block  = {
+               .fn = do_no_restart_syscall,
+       },
+       .se             = {
+               .group_node     = LIST_HEAD_INIT(init_task.se.group_node),
+       },
+       .rt             = {
+               .run_list       = LIST_HEAD_INIT(init_task.rt.run_list),
+               .time_slice     = RR_TIMESLICE,
+       },
+       .tasks          = LIST_HEAD_INIT(init_task.tasks),
+#ifdef CONFIG_SMP
+       .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+       .sched_task_group = &root_task_group,
+#endif
+       .ptraced        = LIST_HEAD_INIT(init_task.ptraced),
+       .ptrace_entry   = LIST_HEAD_INIT(init_task.ptrace_entry),
+       .real_parent    = &init_task,
+       .parent         = &init_task,
+       .children       = LIST_HEAD_INIT(init_task.children),
+       .sibling        = LIST_HEAD_INIT(init_task.sibling),
+       .group_leader   = &init_task,
+       RCU_POINTER_INITIALIZER(real_cred, &init_cred),
+       RCU_POINTER_INITIALIZER(cred, &init_cred),
+       .comm           = INIT_TASK_COMM,
+       .thread         = INIT_THREAD,
+       .fs             = &init_fs,
+       .files          = &init_files,
+       .signal         = &init_signals,
+       .sighand        = &init_sighand,
+       .nsproxy        = &init_nsproxy,
+       .pending        = {
+               .list = LIST_HEAD_INIT(init_task.pending.list),
+               .signal = {{0}}
+       },
+       .blocked        = {{0}},
+       .alloc_lock     = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock),
+       .journal_info   = NULL,
+       INIT_CPU_TIMERS(init_task)
+       .pi_lock        = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
+       .timer_slack_ns = 50000, /* 50 usec default slack */
+       .pids = {
+               [PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID),
+               [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),
+               [PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),
+       },
+       .thread_group   = LIST_HEAD_INIT(init_task.thread_group),
+       .thread_node    = LIST_HEAD_INIT(init_signals.thread_head),
+#ifdef CONFIG_AUDITSYSCALL
+       .loginuid       = INVALID_UID,
+       .sessionid      = (unsigned int)-1,
+#endif
+#ifdef CONFIG_PERF_EVENTS
+       .perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex),
+       .perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list),
+#endif
+#ifdef CONFIG_PREEMPT_RCU
+       .rcu_read_lock_nesting = 0,
+       .rcu_read_unlock_special.s = 0,
+       .rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry),
+       .rcu_blocked_node = NULL,
+#endif
+#ifdef CONFIG_TASKS_RCU
+       .rcu_tasks_holdout = false,
+       .rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
+       .rcu_tasks_idle_cpu = -1,
+#endif
+#ifdef CONFIG_CPUSETS
+       .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
+#endif
+#ifdef CONFIG_RT_MUTEXES
+       .pi_waiters     = RB_ROOT_CACHED,
+       .pi_top_task    = NULL,
+#endif
+       INIT_PREV_CPUTIME(init_task)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+       .vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount),
+       .vtime.starttime = 0,
+       .vtime.state    = VTIME_SYS,
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+       .numa_preferred_nid = -1,
+       .numa_group     = NULL,
+       .numa_faults    = NULL,
+#endif
+#ifdef CONFIG_KASAN
+       .kasan_depth    = 1,
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+       .softirqs_enabled = 1,
+#endif
+#ifdef CONFIG_LOCKDEP
+       .lockdep_recursion = 0,
+#endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .ret_stack      = NULL,
+#endif
+#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
+       .trace_recursion = 0,
+#endif
+#ifdef CONFIG_LIVEPATCH
+       .patch_state    = KLP_UNDEFINED,
+#endif
+#ifdef CONFIG_SECURITY
+       .security       = NULL,
+#endif
+};
 EXPORT_SYMBOL(init_task);
 
 /*
  * Initial thread structure. Alignment of this is handled by a special
  * linker map entry.
  */
-union thread_union init_thread_union __init_task_data = {
 #ifndef CONFIG_THREAD_INFO_IN_TASK
-       INIT_THREAD_INFO(init_task)
+struct thread_info init_thread_info __init_thread_info = INIT_THREAD_INFO(init_task);
 #endif
-};
index dfec380..a8100b9 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/slab.h>
 #include <linux/perf_event.h>
 #include <linux/ptrace.h>
+#include <linux/pti.h>
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
 #include <linux/sched_clock.h>
@@ -504,6 +505,10 @@ static void __init mm_init(void)
        pgtable_init();
        vmalloc_init();
        ioremap_huge_init();
+       /* Should be run before the first non-init thread is created */
+       init_espfix_bsp();
+       /* Should be run after espfix64 is set up. */
+       pti_init();
 }
 
 asmlinkage __visible void __init start_kernel(void)
@@ -588,6 +593,12 @@ asmlinkage __visible void __init start_kernel(void)
                local_irq_disable();
        radix_tree_init();
 
+       /*
+        * Set up housekeeping before setting up workqueues to allow the unbound
+        * workqueue to take non-housekeeping into account.
+        */
+       housekeeping_init();
+
        /*
         * Allow workqueue creation and work item queueing/cancelling
         * early.  Work item execution depends on kthreads and starts after
@@ -605,7 +616,6 @@ asmlinkage __visible void __init start_kernel(void)
        early_irq_init();
        init_IRQ();
        tick_init();
-       housekeeping_init();
        rcu_init_nohz();
        init_timers();
        hrtimers_init();
@@ -673,10 +683,6 @@ asmlinkage __visible void __init start_kernel(void)
 #ifdef CONFIG_X86
        if (efi_enabled(EFI_RUNTIME_SERVICES))
                efi_enter_virtual_mode();
-#endif
-#ifdef CONFIG_X86_ESPFIX64
-       /* Should be run before the first non-init thread is created */
-       init_espfix_bsp();
 #endif
        thread_stack_cache_init();
        cred_init();
index d240256..9649ecd 100644 (file)
@@ -331,7 +331,7 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type,
                         void *data)
 {
        struct ipc_namespace *ns;
-       if (flags & MS_KERNMOUNT) {
+       if (flags & SB_KERNMOUNT) {
                ns = data;
                data = NULL;
        } else {
index d15c0ee..addf773 100644 (file)
@@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
 {
        struct kstatfs sbuf;
 
-       if (time_is_before_jiffies(acct->needcheck))
+       if (time_is_after_jiffies(acct->needcheck))
                goto out;
 
        /* May block */
index 7c25426..ab94d30 100644 (file)
@@ -53,9 +53,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 {
        bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
        int numa_node = bpf_map_attr_numa_node(attr);
+       u32 elem_size, index_mask, max_entries;
+       bool unpriv = !capable(CAP_SYS_ADMIN);
        struct bpf_array *array;
-       u64 array_size;
-       u32 elem_size;
+       u64 array_size, mask64;
 
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -72,11 +73,32 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        elem_size = round_up(attr->value_size, 8);
 
+       max_entries = attr->max_entries;
+
+       /* On 32 bit archs roundup_pow_of_two() with max_entries that has
+        * upper most bit set in u32 space is undefined behavior due to
+        * resulting 1U << 32, so do it manually here in u64 space.
+        */
+       mask64 = fls_long(max_entries - 1);
+       mask64 = 1ULL << mask64;
+       mask64 -= 1;
+
+       index_mask = mask64;
+       if (unpriv) {
+               /* round up array size to nearest power of 2,
+                * since cpu will speculate within index_mask limits
+                */
+               max_entries = index_mask + 1;
+               /* Check for overflows. */
+               if (max_entries < attr->max_entries)
+                       return ERR_PTR(-E2BIG);
+       }
+
        array_size = sizeof(*array);
        if (percpu)
-               array_size += (u64) attr->max_entries * sizeof(void *);
+               array_size += (u64) max_entries * sizeof(void *);
        else
-               array_size += (u64) attr->max_entries * elem_size;
+               array_size += (u64) max_entries * elem_size;
 
        /* make sure there is no u32 overflow later in round_up() */
        if (array_size >= U32_MAX - PAGE_SIZE)
@@ -86,6 +108,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        array = bpf_map_area_alloc(array_size, numa_node);
        if (!array)
                return ERR_PTR(-ENOMEM);
+       array->index_mask = index_mask;
+       array->map.unpriv_array = unpriv;
 
        /* copy mandatory map attributes */
        array->map.map_type = attr->map_type;
@@ -121,12 +145,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
        if (unlikely(index >= array->map.max_entries))
                return NULL;
 
-       return array->value + array->elem_size * index;
+       return array->value + array->elem_size * (index & array->index_mask);
 }
 
 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 {
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
        struct bpf_insn *insn = insn_buf;
        u32 elem_size = round_up(map->value_size, 8);
        const int ret = BPF_REG_0;
@@ -135,7 +160,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 
        *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
        *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
-       *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
+       if (map->unpriv_array) {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
+               *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
+       } else {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
+       }
 
        if (is_power_of_2(elem_size)) {
                *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
@@ -157,7 +187,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
        if (unlikely(index >= array->map.max_entries))
                return NULL;
 
-       return this_cpu_ptr(array->pptrs[index]);
+       return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 }
 
 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
@@ -177,7 +207,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
         */
        size = round_up(map->value_size, 8);
        rcu_read_lock();
-       pptr = array->pptrs[index];
+       pptr = array->pptrs[index & array->index_mask];
        for_each_possible_cpu(cpu) {
                bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
                off += size;
@@ -225,10 +255,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
                return -EEXIST;
 
        if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
-               memcpy(this_cpu_ptr(array->pptrs[index]),
+               memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
                       value, map->value_size);
        else
-               memcpy(array->value + array->elem_size * index,
+               memcpy(array->value +
+                      array->elem_size * (index & array->index_mask),
                       value, map->value_size);
        return 0;
 }
@@ -262,7 +293,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
         */
        size = round_up(map->value_size, 8);
        rcu_read_lock();
-       pptr = array->pptrs[index];
+       pptr = array->pptrs[index & array->index_mask];
        for_each_possible_cpu(cpu) {
                bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
                off += size;
@@ -613,6 +644,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
 static u32 array_of_map_gen_lookup(struct bpf_map *map,
                                   struct bpf_insn *insn_buf)
 {
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
        u32 elem_size = round_up(map->value_size, 8);
        struct bpf_insn *insn = insn_buf;
        const int ret = BPF_REG_0;
@@ -621,7 +653,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
 
        *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
        *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
-       *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+       if (map->unpriv_array) {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
+               *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
+       } else {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+       }
        if (is_power_of_2(elem_size))
                *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
        else
index b9f8686..7949e8b 100644 (file)
@@ -767,6 +767,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 }
 EXPORT_SYMBOL_GPL(__bpf_call_base);
 
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
 /**
  *     __bpf_prog_run - run eBPF program on a given context
  *     @ctx: is the data we are operating on
@@ -955,7 +956,7 @@ select_insn:
                DST = tmp;
                CONT;
        ALU_MOD_X:
-               if (unlikely(SRC == 0))
+               if (unlikely((u32)SRC == 0))
                        return 0;
                tmp = (u32) DST;
                DST = do_div(tmp, (u32) SRC);
@@ -974,7 +975,7 @@ select_insn:
                DST = div64_u64(DST, SRC);
                CONT;
        ALU_DIV_X:
-               if (unlikely(SRC == 0))
+               if (unlikely((u32)SRC == 0))
                        return 0;
                tmp = (u32) DST;
                do_div(tmp, (u32) SRC);
@@ -1317,6 +1318,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
 };
 
+#else
+static unsigned int __bpf_prog_ret0(const void *ctx,
+                                   const struct bpf_insn *insn)
+{
+       return 0;
+}
+#endif
+
 bool bpf_prog_array_compatible(struct bpf_array *array,
                               const struct bpf_prog *fp)
 {
@@ -1364,9 +1373,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
 
        fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+       fp->bpf_func = __bpf_prog_ret0;
+#endif
 
        /* eBPF JITs can rewrite the program in case constant
         * blinding is active. However, in case of error during
@@ -1376,6 +1389,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
         */
        if (!bpf_prog_is_dev_bound(fp->aux)) {
                fp = bpf_int_jit_compile(fp);
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+               if (!fp->jited) {
+                       *err = -ENOTSUPP;
+                       return fp;
+               }
+#endif
        } else {
                *err = bpf_prog_offload_compile(fp);
                if (*err)
@@ -1447,7 +1466,8 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
        rcu_read_lock();
        prog = rcu_dereference(progs)->progs;
        for (; *prog; prog++)
-               cnt++;
+               if (*prog != &dummy_bpf_prog.prog)
+                       cnt++;
        rcu_read_unlock();
        return cnt;
 }
index e469e05..3905d4b 100644 (file)
@@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab)
                pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
                                         htab->map.key_size);
                free_percpu(pptr);
+               cond_resched();
        }
 free_elems:
        bpf_map_area_free(htab->elems);
@@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab)
                        goto free_elems;
                htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
                                  pptr);
+               cond_resched();
        }
 
 skip_percpu_elems:
index 01aaef1..5bb5e49 100644 (file)
@@ -368,7 +368,45 @@ out:
        putname(pname);
        return ret;
 }
-EXPORT_SYMBOL_GPL(bpf_obj_get_user);
+
+static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
+{
+       struct bpf_prog *prog;
+       int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (inode->i_op == &bpf_map_iops)
+               return ERR_PTR(-EINVAL);
+       if (inode->i_op != &bpf_prog_iops)
+               return ERR_PTR(-EACCES);
+
+       prog = inode->i_private;
+
+       ret = security_bpf_prog(prog);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       if (!bpf_prog_get_ok(prog, &type, false))
+               return ERR_PTR(-EINVAL);
+
+       return bpf_prog_inc(prog);
+}
+
+struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
+{
+       struct bpf_prog *prog;
+       struct path path;
+       int ret = kern_path(name, LOOKUP_FOLLOW, &path);
+       if (ret)
+               return ERR_PTR(ret);
+       prog = __get_prog_inode(d_backing_inode(path.dentry), type);
+       if (!IS_ERR(prog))
+               touch_atime(&path);
+       path_put(&path);
+       return prog;
+}
+EXPORT_SYMBOL(bpf_prog_get_type_path);
 
 static void bpf_evict_inode(struct inode *inode)
 {
index 68ec884..8455b89 100644 (file)
@@ -1,3 +1,18 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/bug.h>
index 5ee2e41..1712d31 100644 (file)
@@ -591,8 +591,15 @@ static void sock_map_free(struct bpf_map *map)
 
                write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
-               smap_list_remove(psock, &stab->sock_map[i]);
-               smap_release_sock(psock, sock);
+               /* This check handles a racing sock event that can get the
+                * sk_callback_lock before this case but after xchg happens
+                * causing the refcnt to hit zero and sock user data (psock)
+                * to be null and queued for garbage collection.
+                */
+               if (likely(psock)) {
+                       smap_list_remove(psock, &stab->sock_map[i]);
+                       smap_release_sock(psock, sock);
+               }
                write_unlock_bh(&sock->sk_callback_lock);
        }
        rcu_read_unlock();
index 2c4cfea..5cb783f 100644 (file)
@@ -1057,7 +1057,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
 
-static bool bpf_prog_get_ok(struct bpf_prog *prog,
+bool bpf_prog_get_ok(struct bpf_prog *prog,
                            enum bpf_prog_type *attach_type, bool attach_drv)
 {
        /* not an attachment, just a refcount inc, always allow */
index d459357..13551e6 100644 (file)
@@ -978,6 +978,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
        return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
 }
 
+static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
+{
+       const struct bpf_reg_state *reg = cur_regs(env) + regno;
+
+       return reg->type == PTR_TO_CTX;
+}
+
 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
                                   const struct bpf_reg_state *reg,
                                   int off, int size, bool strict)
@@ -1059,6 +1066,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
                break;
        case PTR_TO_STACK:
                pointer_desc = "stack ";
+               /* The stack spill tracking logic in check_stack_write()
+                * and check_stack_read() relies on stack accesses being
+                * aligned.
+                */
+               strict = true;
                break;
        default:
                break;
@@ -1067,6 +1079,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
                                           strict);
 }
 
+/* truncate register to smaller size (in bytes)
+ * must be called with size < BPF_REG_SIZE
+ */
+static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
+{
+       u64 mask;
+
+       /* clear high bits in bit representation */
+       reg->var_off = tnum_cast(reg->var_off, size);
+
+       /* fix arithmetic bounds */
+       mask = ((u64)1 << (size * 8)) - 1;
+       if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
+               reg->umin_value &= mask;
+               reg->umax_value &= mask;
+       } else {
+               reg->umin_value = 0;
+               reg->umax_value = mask;
+       }
+       reg->smin_value = reg->umin_value;
+       reg->smax_value = reg->umax_value;
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -1200,9 +1235,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
        if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
            regs[value_regno].type == SCALAR_VALUE) {
                /* b/h/w load zero-extends, mark upper bits as known 0 */
-               regs[value_regno].var_off =
-                       tnum_cast(regs[value_regno].var_off, size);
-               __update_reg_bounds(&regs[value_regno]);
+               coerce_reg_to_size(&regs[value_regno], size);
        }
        return err;
 }
@@ -1232,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
                return -EACCES;
        }
 
+       if (is_ctx_reg(env, insn->dst_reg)) {
+               verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
+                       insn->dst_reg);
+               return -EACCES;
+       }
+
        /* check whether atomic_add can read the memory */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1282,6 +1321,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
                verbose(env, "invalid variable stack read R%d var_off=%s\n",
                        regno, tn_buf);
+               return -EACCES;
        }
        off = regs[regno].off + regs[regno].var_off.value;
        if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@@ -1674,7 +1714,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
                return -EINVAL;
        }
 
+       /* With LD_ABS/IND some JITs save/restore skb from r1. */
        changes_data = bpf_helper_changes_pkt_data(fn->func);
+       if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
+               verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
+                       func_id_name(func_id), func_id);
+               return -EINVAL;
+       }
 
        memset(&meta, 0, sizeof(meta));
        meta.pkt_access = fn->pkt_access;
@@ -1696,6 +1742,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
        if (err)
                return err;
+       if (func_id == BPF_FUNC_tail_call) {
+               if (meta.map_ptr == NULL) {
+                       verbose(env, "verifier bug\n");
+                       return -EINVAL;
+               }
+               env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
+       }
        err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
        if (err)
                return err;
@@ -1766,14 +1819,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        return 0;
 }
 
-static void coerce_reg_to_32(struct bpf_reg_state *reg)
-{
-       /* clear high 32 bits */
-       reg->var_off = tnum_cast(reg->var_off, 4);
-       /* Update bounds */
-       __update_reg_bounds(reg);
-}
-
 static bool signed_add_overflows(s64 a, s64 b)
 {
        /* Do the add in u64, where overflow is well-defined */
@@ -1794,6 +1839,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
        return res > a;
 }
 
+static bool check_reg_sane_offset(struct bpf_verifier_env *env,
+                                 const struct bpf_reg_state *reg,
+                                 enum bpf_reg_type type)
+{
+       bool known = tnum_is_const(reg->var_off);
+       s64 val = reg->var_off.value;
+       s64 smin = reg->smin_value;
+
+       if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
+               verbose(env, "math between %s pointer and %lld is not allowed\n",
+                       reg_type_str[type], val);
+               return false;
+       }
+
+       if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
+               verbose(env, "%s pointer offset %d is not allowed\n",
+                       reg_type_str[type], reg->off);
+               return false;
+       }
+
+       if (smin == S64_MIN) {
+               verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
+                       reg_type_str[type]);
+               return false;
+       }
+
+       if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
+               verbose(env, "value %lld makes %s pointer be out of bounds\n",
+                       smin, reg_type_str[type]);
+               return false;
+       }
+
+       return true;
+}
+
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
  * If we return -EACCES, caller may want to try again treating pointer as a
@@ -1815,44 +1895,36 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        dst_reg = &regs[dst];
 
-       if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
-               print_verifier_state(env, env->cur_state);
-               verbose(env,
-                       "verifier internal error: known but bad sbounds\n");
-               return -EINVAL;
-       }
-       if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
-               print_verifier_state(env, env->cur_state);
-               verbose(env,
-                       "verifier internal error: known but bad ubounds\n");
-               return -EINVAL;
+       if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
+           smin_val > smax_val || umin_val > umax_val) {
+               /* Taint dst register if offset had invalid bounds derived from
+                * e.g. dead branches.
+                */
+               __mark_reg_unknown(dst_reg);
+               return 0;
        }
 
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
                /* 32-bit ALU ops on pointers produce (meaningless) scalars */
-               if (!env->allow_ptr_leaks)
-                       verbose(env,
-                               "R%d 32-bit pointer arithmetic prohibited\n",
-                               dst);
+               verbose(env,
+                       "R%d 32-bit pointer arithmetic prohibited\n",
+                       dst);
                return -EACCES;
        }
 
        if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
+                       dst);
                return -EACCES;
        }
        if (ptr_reg->type == CONST_PTR_TO_MAP) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
+                       dst);
                return -EACCES;
        }
        if (ptr_reg->type == PTR_TO_PACKET_END) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
+                       dst);
                return -EACCES;
        }
 
@@ -1862,6 +1934,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        dst_reg->type = ptr_reg->type;
        dst_reg->id = ptr_reg->id;
 
+       if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
+           !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
+               return -EINVAL;
+
        switch (opcode) {
        case BPF_ADD:
                /* We can take a fixed offset as long as it doesn't overflow
@@ -1915,9 +1991,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_SUB:
                if (dst_reg == off_reg) {
                        /* scalar -= pointer.  Creates an unknown scalar */
-                       if (!env->allow_ptr_leaks)
-                               verbose(env, "R%d tried to subtract pointer from scalar\n",
-                                       dst);
+                       verbose(env, "R%d tried to subtract pointer from scalar\n",
+                               dst);
                        return -EACCES;
                }
                /* We don't allow subtraction from FP, because (according to
@@ -1925,9 +2000,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                 * be able to deal with it.
                 */
                if (ptr_reg->type == PTR_TO_STACK) {
-                       if (!env->allow_ptr_leaks)
-                               verbose(env, "R%d subtraction from stack pointer prohibited\n",
-                                       dst);
+                       verbose(env, "R%d subtraction from stack pointer prohibited\n",
+                               dst);
                        return -EACCES;
                }
                if (known && (ptr_reg->off - smin_val ==
@@ -1976,28 +2050,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_AND:
        case BPF_OR:
        case BPF_XOR:
-               /* bitwise ops on pointers are troublesome, prohibit for now.
-                * (However, in principle we could allow some cases, e.g.
-                * ptr &= ~3 which would reduce min_value by 3.)
-                */
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
-                               dst, bpf_alu_string[opcode >> 4]);
+               /* bitwise ops on pointers are troublesome, prohibit. */
+               verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
+                       dst, bpf_alu_string[opcode >> 4]);
                return -EACCES;
        default:
                /* other operators (e.g. MUL,LSH) produce non-pointer results */
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
-                               dst, bpf_alu_string[opcode >> 4]);
+               verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
+                       dst, bpf_alu_string[opcode >> 4]);
                return -EACCES;
        }
 
+       if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
+               return -EINVAL;
+
        __update_reg_bounds(dst_reg);
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
        return 0;
 }
 
+/* WARNING: This function does calculations on 64-bit values, but the actual
+ * execution may occur on 32-bit values. Therefore, things like bitshifts
+ * need extra checks in the 32-bit case.
+ */
 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                                      struct bpf_insn *insn,
                                      struct bpf_reg_state *dst_reg,
@@ -2008,12 +2084,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        bool src_known, dst_known;
        s64 smin_val, smax_val;
        u64 umin_val, umax_val;
+       u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 
-       if (BPF_CLASS(insn->code) != BPF_ALU64) {
-               /* 32-bit ALU ops are (32,32)->64 */
-               coerce_reg_to_32(dst_reg);
-               coerce_reg_to_32(&src_reg);
-       }
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
        umin_val = src_reg.umin_value;
@@ -2021,6 +2093,21 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        src_known = tnum_is_const(src_reg.var_off);
        dst_known = tnum_is_const(dst_reg->var_off);
 
+       if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
+           smin_val > smax_val || umin_val > umax_val) {
+               /* Taint dst register if offset had invalid bounds derived from
+                * e.g. dead branches.
+                */
+               __mark_reg_unknown(dst_reg);
+               return 0;
+       }
+
+       if (!src_known &&
+           opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+               __mark_reg_unknown(dst_reg);
+               return 0;
+       }
+
        switch (opcode) {
        case BPF_ADD:
                if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@@ -2149,9 +2236,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                __update_reg_bounds(dst_reg);
                break;
        case BPF_LSH:
-               if (umax_val > 63) {
-                       /* Shifts greater than 63 are undefined.  This includes
-                        * shifts by a negative number.
+               if (umax_val >= insn_bitness) {
+                       /* Shifts greater than 31 or 63 are undefined.
+                        * This includes shifts by a negative number.
                         */
                        mark_reg_unknown(env, regs, insn->dst_reg);
                        break;
@@ -2177,27 +2264,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                __update_reg_bounds(dst_reg);
                break;
        case BPF_RSH:
-               if (umax_val > 63) {
-                       /* Shifts greater than 63 are undefined.  This includes
-                        * shifts by a negative number.
+               if (umax_val >= insn_bitness) {
+                       /* Shifts greater than 31 or 63 are undefined.
+                        * This includes shifts by a negative number.
                         */
                        mark_reg_unknown(env, regs, insn->dst_reg);
                        break;
                }
-               /* BPF_RSH is an unsigned shift, so make the appropriate casts */
-               if (dst_reg->smin_value < 0) {
-                       if (umin_val) {
-                               /* Sign bit will be cleared */
-                               dst_reg->smin_value = 0;
-                       } else {
-                               /* Lost sign bit information */
-                               dst_reg->smin_value = S64_MIN;
-                               dst_reg->smax_value = S64_MAX;
-                       }
-               } else {
-                       dst_reg->smin_value =
-                               (u64)(dst_reg->smin_value) >> umax_val;
-               }
+               /* BPF_RSH is an unsigned shift.  If the value in dst_reg might
+                * be negative, then either:
+                * 1) src_reg might be zero, so the sign bit of the result is
+                *    unknown, so we lose our signed bounds
+                * 2) it's known negative, thus the unsigned bounds capture the
+                *    signed bounds
+                * 3) the signed bounds cross zero, so they tell us nothing
+                *    about the result
+                * If the value in dst_reg is known nonnegative, then again the
+                * unsigned bounts capture the signed bounds.
+                * Thus, in all cases it suffices to blow away our signed bounds
+                * and rely on inferring new ones from the unsigned bounds and
+                * var_off of the result.
+                */
+               dst_reg->smin_value = S64_MIN;
+               dst_reg->smax_value = S64_MAX;
                if (src_known)
                        dst_reg->var_off = tnum_rshift(dst_reg->var_off,
                                                       umin_val);
@@ -2213,6 +2302,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                break;
        }
 
+       if (BPF_CLASS(insn->code) != BPF_ALU64) {
+               /* 32-bit ALU ops are (32,32)->32 */
+               coerce_reg_to_size(dst_reg, 4);
+               coerce_reg_to_size(&src_reg, 4);
+       }
+
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
        return 0;
@@ -2227,7 +2322,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
        struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
        struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
        u8 opcode = BPF_OP(insn->code);
-       int rc;
 
        dst_reg = &regs[insn->dst_reg];
        src_reg = NULL;
@@ -2238,43 +2332,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                if (src_reg->type != SCALAR_VALUE) {
                        if (dst_reg->type != SCALAR_VALUE) {
                                /* Combining two pointers by any ALU op yields
-                                * an arbitrary scalar.
+                                * an arbitrary scalar. Disallow all math except
+                                * pointer subtraction
                                 */
-                               if (!env->allow_ptr_leaks) {
-                                       verbose(env, "R%d pointer %s pointer prohibited\n",
-                                               insn->dst_reg,
-                                               bpf_alu_string[opcode >> 4]);
-                                       return -EACCES;
+                               if (opcode == BPF_SUB){
+                                       mark_reg_unknown(env, regs, insn->dst_reg);
+                                       return 0;
                                }
-                               mark_reg_unknown(env, regs, insn->dst_reg);
-                               return 0;
+                               verbose(env, "R%d pointer %s pointer prohibited\n",
+                                       insn->dst_reg,
+                                       bpf_alu_string[opcode >> 4]);
+                               return -EACCES;
                        } else {
                                /* scalar += pointer
                                 * This is legal, but we have to reverse our
                                 * src/dest handling in computing the range
                                 */
-                               rc = adjust_ptr_min_max_vals(env, insn,
-                                                            src_reg, dst_reg);
-                               if (rc == -EACCES && env->allow_ptr_leaks) {
-                                       /* scalar += unknown scalar */
-                                       __mark_reg_unknown(&off_reg);
-                                       return adjust_scalar_min_max_vals(
-                                                       env, insn,
-                                                       dst_reg, off_reg);
-                               }
-                               return rc;
+                               return adjust_ptr_min_max_vals(env, insn,
+                                                              src_reg, dst_reg);
                        }
                } else if (ptr_reg) {
                        /* pointer += scalar */
-                       rc = adjust_ptr_min_max_vals(env, insn,
-                                                    dst_reg, src_reg);
-                       if (rc == -EACCES && env->allow_ptr_leaks) {
-                               /* unknown scalar += scalar */
-                               __mark_reg_unknown(dst_reg);
-                               return adjust_scalar_min_max_vals(
-                                               env, insn, dst_reg, *src_reg);
-                       }
-                       return rc;
+                       return adjust_ptr_min_max_vals(env, insn,
+                                                      dst_reg, src_reg);
                }
        } else {
                /* Pretend the src is a reg with a known value, since we only
@@ -2283,17 +2363,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                off_reg.type = SCALAR_VALUE;
                __mark_reg_known(&off_reg, insn->imm);
                src_reg = &off_reg;
-               if (ptr_reg) { /* pointer += K */
-                       rc = adjust_ptr_min_max_vals(env, insn,
-                                                    ptr_reg, src_reg);
-                       if (rc == -EACCES && env->allow_ptr_leaks) {
-                               /* unknown scalar += K */
-                               __mark_reg_unknown(dst_reg);
-                               return adjust_scalar_min_max_vals(
-                                               env, insn, dst_reg, off_reg);
-                       }
-                       return rc;
-               }
+               if (ptr_reg) /* pointer += K */
+                       return adjust_ptr_min_max_vals(env, insn,
+                                                      ptr_reg, src_reg);
        }
 
        /* Got here implies adding two SCALAR_VALUEs */
@@ -2390,17 +2462,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                        return -EACCES;
                                }
                                mark_reg_unknown(env, regs, insn->dst_reg);
-                               /* high 32 bits are known zero. */
-                               regs[insn->dst_reg].var_off = tnum_cast(
-                                               regs[insn->dst_reg].var_off, 4);
-                               __update_reg_bounds(&regs[insn->dst_reg]);
+                               coerce_reg_to_size(&regs[insn->dst_reg], 4);
                        }
                } else {
                        /* case: R = imm
                         * remember the value we stored into this reg
                         */
                        regs[insn->dst_reg].type = SCALAR_VALUE;
-                       __mark_reg_known(regs + insn->dst_reg, insn->imm);
+                       if (BPF_CLASS(insn->code) == BPF_ALU64) {
+                               __mark_reg_known(regs + insn->dst_reg,
+                                                insn->imm);
+                       } else {
+                               __mark_reg_known(regs + insn->dst_reg,
+                                                (u32)insn->imm);
+                       }
                }
 
        } else if (opcode > BPF_END) {
@@ -2436,6 +2511,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                        return -EINVAL;
                }
 
+               if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
+                       verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
+                       return -EINVAL;
+               }
+
                if ((opcode == BPF_LSH || opcode == BPF_RSH ||
                     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
                        int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -3431,15 +3511,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
                        return range_within(rold, rcur) &&
                               tnum_in(rold->var_off, rcur->var_off);
                } else {
-                       /* if we knew anything about the old value, we're not
-                        * equal, because we can't know anything about the
-                        * scalar value of the pointer in the new value.
+                       /* We're trying to use a pointer in place of a scalar.
+                        * Even if the scalar was unbounded, this could lead to
+                        * pointer leaks because scalars are allowed to leak
+                        * while pointers are not. We could make this safe in
+                        * special cases if root is calling us, but it's
+                        * probably not worth the hassle.
                         */
-                       return rold->umin_value == 0 &&
-                              rold->umax_value == U64_MAX &&
-                              rold->smin_value == S64_MIN &&
-                              rold->smax_value == S64_MAX &&
-                              tnum_is_unknown(rold->var_off);
+                       return false;
                }
        case PTR_TO_MAP_VALUE:
                /* If the new min/max/var_off satisfy the old ones and
@@ -3932,6 +4011,12 @@ static int do_check(struct bpf_verifier_env *env)
                        if (err)
                                return err;
 
+                       if (is_ctx_reg(env, insn->dst_reg)) {
+                               verbose(env, "BPF_ST stores into R%d context is not allowed\n",
+                                       insn->dst_reg);
+                               return -EACCES;
+                       }
+
                        /* check that memory (dst_reg + off) is writeable */
                        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_WRITE,
@@ -4384,6 +4469,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
        int i, cnt, delta = 0;
 
        for (i = 0; i < insn_cnt; i++, insn++) {
+               if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+                   insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+                       /* due to JIT bugs clear upper 32-bits of src register
+                        * before div/mod operation
+                        */
+                       insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
+                       insn_buf[1] = *insn;
+                       cnt = 2;
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       continue;
+               }
+
                if (insn->code != (BPF_JMP | BPF_CALL))
                        continue;
 
@@ -4407,6 +4510,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                         */
                        insn->imm = 0;
                        insn->code = BPF_JMP | BPF_TAIL_CALL;
+
+                       /* instead of changing every JIT dealing with tail_call
+                        * emit two extra insns:
+                        * if (index >= max_entries) goto out;
+                        * index &= array->index_mask;
+                        * to avoid out-of-bounds cpu speculation
+                        */
+                       map_ptr = env->insn_aux_data[i + delta].map_ptr;
+                       if (map_ptr == BPF_MAP_PTR_POISON) {
+                               verbose(env, "tail_call abusing map_ptr\n");
+                               return -EINVAL;
+                       }
+                       if (!map_ptr->unpriv_array)
+                               continue;
+                       insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
+                                                 map_ptr->max_entries, 2);
+                       insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
+                                                   container_of(map_ptr,
+                                                                struct bpf_array,
+                                                                map)->index_mask);
+                       insn_buf[2] = *insn;
+                       cnt = 3;
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
                        continue;
                }
 
index 024085d..a2c05d2 100644 (file)
@@ -123,7 +123,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
         */
        do {
                css_task_iter_start(&from->self, 0, &it);
-               task = css_task_iter_next(&it);
+
+               do {
+                       task = css_task_iter_next(&it);
+               } while (task && (task->flags & PF_EXITING));
+
                if (task)
                        get_task_struct(task);
                css_task_iter_end(&it);
index 0b1ffe1..7e4c445 100644 (file)
@@ -1397,7 +1397,7 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
                         cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
                         cft->name);
        else
-               strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
+               strlcpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
        return buf;
 }
 
@@ -1864,9 +1864,9 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
 
        root->flags = opts->flags;
        if (opts->release_agent)
-               strcpy(root->release_agent_path, opts->release_agent);
+               strlcpy(root->release_agent_path, opts->release_agent, PATH_MAX);
        if (opts->name)
-               strcpy(root->name, opts->name);
+               strlcpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
        if (opts->cpuset_clone_children)
                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
 }
@@ -4125,26 +4125,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
 
 static void css_task_iter_advance(struct css_task_iter *it)
 {
-       struct list_head *l = it->task_pos;
+       struct list_head *next;
 
        lockdep_assert_held(&css_set_lock);
-       WARN_ON_ONCE(!l);
-
 repeat:
        /*
         * Advance iterator to find next entry.  cset->tasks is consumed
         * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
         * next cset.
         */
-       l = l->next;
+       next = it->task_pos->next;
 
-       if (l == it->tasks_head)
-               l = it->mg_tasks_head->next;
+       if (next == it->tasks_head)
+               next = it->mg_tasks_head->next;
 
-       if (l == it->mg_tasks_head)
+       if (next == it->mg_tasks_head)
                css_task_iter_advance_css_set(it);
        else
-               it->task_pos = l;
+               it->task_pos = next;
 
        /* if PROCS, skip over tasks which aren't group leaders */
        if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
@@ -4449,6 +4447,7 @@ static struct cftype cgroup_base_files[] = {
        },
        {
                .name = "cgroup.threads",
+               .flags = CFTYPE_NS_DELEGATABLE,
                .release = cgroup_procs_release,
                .seq_start = cgroup_threads_start,
                .seq_next = cgroup_procs_next,
index 5f780d8..9caeda6 100644 (file)
@@ -50,7 +50,7 @@ static int current_css_set_read(struct seq_file *seq, void *v)
 
        spin_lock_irq(&css_set_lock);
        rcu_read_lock();
-       cset = rcu_dereference(current->cgroups);
+       cset = task_css_set(current);
        refcnt = refcount_read(&cset->refcount);
        seq_printf(seq, "css_set %pK %d", cset, refcnt);
        if (refcnt > cset->nr_tasks)
@@ -96,7 +96,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
 
        spin_lock_irq(&css_set_lock);
        rcu_read_lock();
-       cset = rcu_dereference(current->cgroups);
+       cset = task_css_set(current);
        list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
                struct cgroup *c = link->cgrp;
 
index 133b465..1e111dd 100644 (file)
@@ -296,8 +296,12 @@ int cgroup_stat_init(struct cgroup *cgrp)
        }
 
        /* ->updated_children list is self terminated */
-       for_each_possible_cpu(cpu)
-               cgroup_cpu_stat(cgrp, cpu)->updated_children = cgrp;
+       for_each_possible_cpu(cpu) {
+               struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
+
+               cstat->updated_children = cgrp;
+               u64_stats_init(&cstat->sync);
+       }
 
        prev_cputime_init(&cgrp->stat.prev_cputime);
 
diff --git a/kernel/configs/nopm.config b/kernel/configs/nopm.config
new file mode 100644 (file)
index 0000000..81ff078
--- /dev/null
@@ -0,0 +1,15 @@
+CONFIG_PM=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+
+# Triggers PM on OMAP
+CONFIG_CPU_IDLE=n
+
+# Triggers enablement via hibernate callbacks
+CONFIG_XEN=n
+
+# ARM/ARM64 architectures that select PM unconditionally
+CONFIG_ARCH_OMAP2PLUS_TYPICAL=n
+CONFIG_ARCH_RENESAS=n
+CONFIG_ARCH_TEGRA=n
+CONFIG_ARCH_VEXPRESS=n
index 04892a8..53f7dc6 100644 (file)
@@ -80,19 +80,19 @@ static struct lockdep_map cpuhp_state_down_map =
        STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
 
 
-static void inline cpuhp_lock_acquire(bool bringup)
+static inline void cpuhp_lock_acquire(bool bringup)
 {
        lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 }
 
-static void inline cpuhp_lock_release(bool bringup)
+static inline void cpuhp_lock_release(bool bringup)
 {
        lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 }
 #else
 
-static void inline cpuhp_lock_acquire(bool bringup) { }
-static void inline cpuhp_lock_release(bool bringup) { }
+static inline void cpuhp_lock_acquire(bool bringup) { }
+static inline void cpuhp_lock_release(bool bringup) { }
 
 #endif
 
@@ -780,8 +780,8 @@ static int takedown_cpu(unsigned int cpu)
        BUG_ON(cpu_online(cpu));
 
        /*
-        * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
-        * runnable tasks from the cpu, there's only the idle task left now
+        * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
+        * all runnable tasks from the CPU, there's only the idle task left now
         * that the migration thread is done doing the stop_machine thing.
         *
         * Wait for the stop thread to go away.
@@ -1277,9 +1277,9 @@ static struct cpuhp_step cpuhp_bp_states[] = {
         * before blk_mq_queue_reinit_notify() from notify_dead(),
         * otherwise a RCU stall occurs.
         */
-       [CPUHP_TIMERS_DEAD] = {
+       [CPUHP_TIMERS_PREPARE] = {
                .name                   = "timers:dead",
-               .startup.single         = NULL,
+               .startup.single         = timers_prepare_cpu,
                .teardown.single        = timers_dead_cpu,
        },
        /* Kicks the plugged cpu into life */
@@ -1289,11 +1289,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
                .teardown.single        = NULL,
                .cant_stop              = true,
        },
-       [CPUHP_AP_SMPCFD_DYING] = {
-               .name                   = "smpcfd:dying",
-               .startup.single         = NULL,
-               .teardown.single        = smpcfd_dying_cpu,
-       },
        /*
         * Handled on controll processor until the plugged processor manages
         * this itself.
@@ -1335,6 +1330,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
                .startup.single         = NULL,
                .teardown.single        = rcutree_dying_cpu,
        },
+       [CPUHP_AP_SMPCFD_DYING] = {
+               .name                   = "smpcfd:dying",
+               .startup.single         = NULL,
+               .teardown.single        = smpcfd_dying_cpu,
+       },
        /* Entry state on starting. Interrupts enabled from here on. Transient
         * state for synchronsization */
        [CPUHP_AP_ONLINE] = {
index b366389..4f63597 100644 (file)
@@ -410,7 +410,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_SYMBOL(contig_page_data);
 #endif
 #ifdef CONFIG_SPARSEMEM
-       VMCOREINFO_SYMBOL(mem_section);
+       VMCOREINFO_SYMBOL_ARRAY(mem_section);
        VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
        VMCOREINFO_STRUCT_SIZE(mem_section);
        VMCOREINFO_OFFSET(mem_section, section_mem_map);
index e74be38..ed5d349 100644 (file)
@@ -350,7 +350,7 @@ poll_again:
                        }
                        kdb_printf("\n");
                        for (i = 0; i < count; i++) {
-                               if (kallsyms_symbol_next(p_tmp, i) < 0)
+                               if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
                                        break;
                                kdb_printf("%s ", p_tmp);
                                *(p_tmp + len) = '\0';
index 4a1c334..e2764d7 100644 (file)
@@ -51,16 +51,16 @@ void __delayacct_tsk_init(struct task_struct *tsk)
  * Finish delay accounting for a statistic using its timestamps (@start),
  * accumalator (@total) and @count
  */
-static void delayacct_end(u64 *start, u64 *total, u32 *count)
+static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count)
 {
        s64 ns = ktime_get_ns() - *start;
        unsigned long flags;
 
        if (ns > 0) {
-               spin_lock_irqsave(&current->delays->lock, flags);
+               spin_lock_irqsave(lock, flags);
                *total += ns;
                (*count)++;
-               spin_unlock_irqrestore(&current->delays->lock, flags);
+               spin_unlock_irqrestore(lock, flags);
        }
 }
 
@@ -69,17 +69,25 @@ void __delayacct_blkio_start(void)
        current->delays->blkio_start = ktime_get_ns();
 }
 
-void __delayacct_blkio_end(void)
+/*
+ * We cannot rely on the `current` macro, as we haven't yet switched back to
+ * the process being woken.
+ */
+void __delayacct_blkio_end(struct task_struct *p)
 {
-       if (current->delays->flags & DELAYACCT_PF_SWAPIN)
-               /* Swapin block I/O */
-               delayacct_end(&current->delays->blkio_start,
-                       &current->delays->swapin_delay,
-                       &current->delays->swapin_count);
-       else    /* Other block I/O */
-               delayacct_end(&current->delays->blkio_start,
-                       &current->delays->blkio_delay,
-                       &current->delays->blkio_count);
+       struct task_delay_info *delays = p->delays;
+       u64 *total;
+       u32 *count;
+
+       if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
+               total = &delays->swapin_delay;
+               count = &delays->swapin_count;
+       } else {
+               total = &delays->blkio_delay;
+               count = &delays->blkio_count;
+       }
+
+       delayacct_end(&delays->lock, &delays->blkio_start, total, count);
 }
 
 int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
@@ -153,8 +161,10 @@ void __delayacct_freepages_start(void)
 
 void __delayacct_freepages_end(void)
 {
-       delayacct_end(&current->delays->freepages_start,
-                       &current->delays->freepages_delay,
-                       &current->delays->freepages_count);
+       delayacct_end(
+               &current->delays->lock,
+               &current->delays->freepages_start,
+               &current->delays->freepages_delay,
+               &current->delays->freepages_count);
 }
 
index 16beab4..5d8f403 100644 (file)
@@ -1231,6 +1231,10 @@ static void put_ctx(struct perf_event_context *ctx)
  *           perf_event_context::lock
  *         perf_event::mmap_mutex
  *         mmap_sem
+ *
+ *    cpu_hotplug_lock
+ *      pmus_lock
+ *       cpuctx->mutex / perf_event_context::mutex
  */
 static struct perf_event_context *
 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
@@ -4196,6 +4200,7 @@ int perf_event_release_kernel(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *child, *tmp;
+       LIST_HEAD(free_list);
 
        /*
         * If we got here through err_file: fput(event_file); we will not have
@@ -4268,8 +4273,7 @@ again:
                                               struct perf_event, child_list);
                if (tmp == child) {
                        perf_remove_from_context(child, DETACH_GROUP);
-                       list_del(&child->child_list);
-                       free_event(child);
+                       list_move(&child->child_list, &free_list);
                        /*
                         * This matches the refcount bump in inherit_event();
                         * this can't be the last reference.
@@ -4284,6 +4288,11 @@ again:
        }
        mutex_unlock(&event->child_mutex);
 
+       list_for_each_entry_safe(child, tmp, &free_list, child_list) {
+               list_del(&child->child_list);
+               free_event(child);
+       }
+
 no_ctx:
        put_event(event); /* Must be the 'last' reference */
        return 0;
@@ -6639,6 +6648,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
        struct perf_namespaces_event *namespaces_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
+       u16 header_size = namespaces_event->event_id.header.size;
        int ret;
 
        if (!perf_event_namespaces_match(event))
@@ -6649,7 +6659,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
        ret = perf_output_begin(&handle, event,
                                namespaces_event->event_id.header.size);
        if (ret)
-               return;
+               goto out;
 
        namespaces_event->event_id.pid = perf_event_pid(event,
                                                        namespaces_event->task);
@@ -6661,6 +6671,8 @@ static void perf_event_namespaces_output(struct perf_event *event,
        perf_event__output_id_sample(event, &handle, &sample);
 
        perf_output_end(&handle);
+out:
+       namespaces_event->event_id.header.size = header_size;
 }
 
 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
@@ -7987,11 +7999,11 @@ static void bpf_overflow_handler(struct perf_event *event,
 {
        struct bpf_perf_event_data_kern ctx = {
                .data = data,
-               .regs = regs,
                .event = event,
        };
        int ret = 0;
 
+       ctx.regs = perf_arch_bpf_user_pt_regs(regs);
        preempt_disable();
        if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
                goto out;
@@ -8513,6 +8525,29 @@ fail_clear_files:
        return ret;
 }
 
+static int
+perf_tracepoint_set_filter(struct perf_event *event, char *filter_str)
+{
+       struct perf_event_context *ctx = event->ctx;
+       int ret;
+
+       /*
+        * Beware, here be dragons!!
+        *
+        * the tracepoint muck will deadlock against ctx->mutex, but the tracepoint
+        * stuff does not actually need it. So temporarily drop ctx->mutex. As per
+        * perf_event_ctx_lock() we already have a reference on ctx.
+        *
+        * This can result in event getting moved to a different ctx, but that
+        * does not affect the tracepoint state.
+        */
+       mutex_unlock(&ctx->mutex);
+       ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
+       mutex_lock(&ctx->mutex);
+
+       return ret;
+}
+
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
 {
        char *filter_str;
@@ -8529,8 +8564,7 @@ static int perf_event_set_filter(struct perf_event *event, void __user *arg)
 
        if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
            event->attr.type == PERF_TYPE_TRACEPOINT)
-               ret = ftrace_profile_set_filter(event, event->attr.config,
-                                               filter_str);
+               ret = perf_tracepoint_set_filter(event, filter_str);
        else if (has_addr_filter(event))
                ret = perf_event_set_addr_filter(event, filter_str);
 
@@ -9165,7 +9199,13 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
        if (!try_module_get(pmu->module))
                return -ENODEV;
 
-       if (event->group_leader != event) {
+       /*
+        * A number of pmu->event_init() methods iterate the sibling_list to,
+        * for example, validate if the group fits on the PMU. Therefore,
+        * if this is a sibling event, acquire the ctx->mutex to protect
+        * the sibling_list.
+        */
+       if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
                /*
                 * This ctx->mutex can nest when we're called through
                 * inheritance. See the perf_event_ctx_lock_nested() comment.
index 6b4298a..995453d 100644 (file)
@@ -1755,3 +1755,12 @@ Efault:
        return -EFAULT;
 }
 #endif
+
+__weak void abort(void)
+{
+       BUG();
+
+       /* if that doesn't kill us, halt */
+       panic("Oops failed to kill thread");
+}
+EXPORT_SYMBOL(abort);
index 432eadf..2295fc6 100644 (file)
@@ -721,8 +721,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                        goto out;
        }
        /* a new mm has just been created */
-       arch_dup_mmap(oldmm, mm);
-       retval = 0;
+       retval = arch_dup_mmap(oldmm, mm);
 out:
        up_write(&mm->mmap_sem);
        flush_tlb_mm(oldmm);
index 76ed592..7f719d1 100644 (file)
@@ -1582,8 +1582,8 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
 {
        unsigned int op =         (encoded_op & 0x70000000) >> 28;
        unsigned int cmp =        (encoded_op & 0x0f000000) >> 24;
-       int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 12);
-       int cmparg = sign_extend32(encoded_op & 0x00000fff, 12);
+       int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
+       int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
        int oldval, ret;
 
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
@@ -1878,6 +1878,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
        struct futex_q *this, *next;
        DEFINE_WAKE_Q(wake_q);
 
+       if (nr_wake < 0 || nr_requeue < 0)
+               return -EINVAL;
+
        /*
         * When PI not supported: return -ENOSYS if requeue_pi is true,
         * consequently the compiler knows requeue_pi is always false past
@@ -2294,34 +2297,33 @@ static void unqueue_me_pi(struct futex_q *q)
        spin_unlock(q->lock_ptr);
 }
 
-/*
- * Fixup the pi_state owner with the new owner.
- *
- * Must be called with hash bucket lock held and mm->sem held for non
- * private futexes.
- */
 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-                               struct task_struct *newowner)
+                               struct task_struct *argowner)
 {
-       u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
        struct futex_pi_state *pi_state = q->pi_state;
        u32 uval, uninitialized_var(curval), newval;
-       struct task_struct *oldowner;
+       struct task_struct *oldowner, *newowner;
+       u32 newtid;
        int ret;
 
+       lockdep_assert_held(q->lock_ptr);
+
        raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
 
        oldowner = pi_state->owner;
-       /* Owner died? */
-       if (!pi_state->owner)
-               newtid |= FUTEX_OWNER_DIED;
 
        /*
-        * We are here either because we stole the rtmutex from the
-        * previous highest priority waiter or we are the highest priority
-        * waiter but have failed to get the rtmutex the first time.
+        * We are here because either:
+        *
+        *  - we stole the lock and pi_state->owner needs updating to reflect
+        *    that (@argowner == current),
+        *
+        * or:
         *
-        * We have to replace the newowner TID in the user space variable.
+        *  - someone stole our lock and we need to fix things to point to the
+        *    new owner (@argowner == NULL).
+        *
+        * Either way, we have to replace the TID in the user space variable.
         * This must be atomic as we have to preserve the owner died bit here.
         *
         * Note: We write the user space value _before_ changing the pi_state
@@ -2334,6 +2336,45 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
         * in the PID check in lookup_pi_state.
         */
 retry:
+       if (!argowner) {
+               if (oldowner != current) {
+                       /*
+                        * We raced against a concurrent self; things are
+                        * already fixed up. Nothing to do.
+                        */
+                       ret = 0;
+                       goto out_unlock;
+               }
+
+               if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
+                       /* We got the lock after all, nothing to fix. */
+                       ret = 0;
+                       goto out_unlock;
+               }
+
+               /*
+                * Since we just failed the trylock; there must be an owner.
+                */
+               newowner = rt_mutex_owner(&pi_state->pi_mutex);
+               BUG_ON(!newowner);
+       } else {
+               WARN_ON_ONCE(argowner != current);
+               if (oldowner == current) {
+                       /*
+                        * We raced against a concurrent self; things are
+                        * already fixed up. Nothing to do.
+                        */
+                       ret = 0;
+                       goto out_unlock;
+               }
+               newowner = argowner;
+       }
+
+       newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
+       /* Owner died? */
+       if (!pi_state->owner)
+               newtid |= FUTEX_OWNER_DIED;
+
        if (get_futex_value_locked(&uval, uaddr))
                goto handle_fault;
 
@@ -2434,15 +2475,28 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
                 * Got the lock. We might not be the anticipated owner if we
                 * did a lock-steal - fix up the PI-state in that case:
                 *
-                * We can safely read pi_state->owner without holding wait_lock
-                * because we now own the rt_mutex, only the owner will attempt
-                * to change it.
+                * Speculative pi_state->owner read (we don't hold wait_lock);
+                * since we own the lock pi_state->owner == current is the
+                * stable state, anything else needs more attention.
                 */
                if (q->pi_state->owner != current)
                        ret = fixup_pi_state_owner(uaddr, q, current);
                goto out;
        }
 
+       /*
+        * If we didn't get the lock; check if anybody stole it from us. In
+        * that case, we need to fix up the uval to point to them instead of
+        * us, otherwise bad things happen. [10]
+        *
+        * Another speculative read; pi_state->owner == current is unstable
+        * but needs our attention.
+        */
+       if (q->pi_state->owner == current) {
+               ret = fixup_pi_state_owner(uaddr, q, NULL);
+               goto out;
+       }
+
        /*
         * Paranoia check. If we did not take the lock, then we should not be
         * the owner of the rt_mutex.
index e357bc8..daae2f2 100644 (file)
@@ -86,11 +86,12 @@ static int gid_cmp(const void *_a, const void *_b)
        return gid_gt(a, b) - gid_lt(a, b);
 }
 
-static void groups_sort(struct group_info *group_info)
+void groups_sort(struct group_info *group_info)
 {
        sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid),
             gid_cmp, NULL);
 }
+EXPORT_SYMBOL(groups_sort);
 
 /* a simple bsearch */
 int groups_search(const struct group_info *group_info, kgid_t grp)
@@ -122,7 +123,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp)
 void set_groups(struct cred *new, struct group_info *group_info)
 {
        put_group_info(new->group_info);
-       groups_sort(group_info);
        get_group_info(group_info);
        new->group_info = group_info;
 }
@@ -206,6 +206,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
                return retval;
        }
 
+       groups_sort(group_info);
        retval = set_current_groups(group_info);
        put_group_info(group_info);
 
index 17f05ef..e4d3819 100644 (file)
 
 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
 {
+       static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+       if (!__ratelimit(&ratelimit))
+               return;
+
        printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
                irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
        printk("->handle_irq():  %p, ", desc->handle_irq);
index 7f608ac..acfaaef 100644 (file)
@@ -113,6 +113,7 @@ static const struct irq_bit_descr irqdata_states[] = {
        BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
        BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
        BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+       BIT_MASK_DESCR(IRQD_CAN_RESERVE),
 
        BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
 
index c26c5bb..508c03d 100644 (file)
@@ -364,10 +364,11 @@ irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
 EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
 
 /*
- * Separate lockdep class for interrupt chip which can nest irq_desc
- * lock.
+ * Separate lockdep classes for interrupt chip which can nest irq_desc
+ * lock and request mutex.
  */
 static struct lock_class_key irq_nested_lock_class;
+static struct lock_class_key irq_nested_request_class;
 
 /*
  * irq_map_generic_chip - Map a generic chip for an irq domain
@@ -409,7 +410,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
        set_bit(idx, &gc->installed);
 
        if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
-               irq_set_lockdep_class(virq, &irq_nested_lock_class);
+               irq_set_lockdep_class(virq, &irq_nested_lock_class,
+                                     &irq_nested_request_class);
 
        if (chip->irq_calc_mask)
                chip->irq_calc_mask(data);
@@ -479,7 +481,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
                        continue;
 
                if (flags & IRQ_GC_INIT_NESTED_LOCK)
-                       irq_set_lockdep_class(i, &irq_nested_lock_class);
+                       irq_set_lockdep_class(i, &irq_nested_lock_class,
+                                             &irq_nested_request_class);
 
                if (!(flags & IRQ_GC_NO_MASK)) {
                        struct irq_data *d = irq_get_irq_data(i);
index 07d08ca..ab19371 100644 (file)
@@ -440,7 +440,7 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
 #endif /* !CONFIG_GENERIC_PENDING_IRQ */
 
 #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
-static inline int irq_domain_activate_irq(struct irq_data *data, bool early)
+static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve)
 {
        irqd_set_activated(data);
        return 0;
index 4f4f600..62068ad 100644 (file)
@@ -1693,7 +1693,7 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
        }
 }
 
-static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
+static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
 {
        int ret = 0;
 
@@ -1702,9 +1702,9 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
 
                if (irqd->parent_data)
                        ret = __irq_domain_activate_irq(irqd->parent_data,
-                                                       early);
+                                                       reserve);
                if (!ret && domain->ops->activate) {
-                       ret = domain->ops->activate(domain, irqd, early);
+                       ret = domain->ops->activate(domain, irqd, reserve);
                        /* Rollback in case of error */
                        if (ret && irqd->parent_data)
                                __irq_domain_deactivate_irq(irqd->parent_data);
@@ -1716,17 +1716,18 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
 /**
  * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  *                          interrupt
- * @irq_data:  outermost irq_data associated with interrupt
+ * @irq_data:  Outermost irq_data associated with interrupt
+ * @reserve:   If set only reserve an interrupt vector instead of assigning one
  *
  * This is the second step to call domain_ops->activate to program interrupt
  * controllers, so the interrupt could actually get delivered.
  */
-int irq_domain_activate_irq(struct irq_data *irq_data, bool early)
+int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
 {
        int ret = 0;
 
        if (!irqd_is_activated(irq_data))
-               ret = __irq_domain_activate_irq(irq_data, early);
+               ret = __irq_domain_activate_irq(irq_data, reserve);
        if (!ret)
                irqd_set_activated(irq_data);
        return ret;
index 7df2480..5187dfe 100644 (file)
@@ -321,15 +321,23 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
                     bool reserved, unsigned int *mapped_cpu)
 {
-       unsigned int cpu;
+       unsigned int cpu, best_cpu, maxavl = 0;
+       struct cpumap *cm;
+       unsigned int bit;
 
+       best_cpu = UINT_MAX;
        for_each_cpu(cpu, msk) {
-               struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
-               unsigned int bit;
+               cm = per_cpu_ptr(m->maps, cpu);
 
-               if (!cm->online)
+               if (!cm->online || cm->available <= maxavl)
                        continue;
 
+               best_cpu = cpu;
+               maxavl = cm->available;
+       }
+
+       if (maxavl) {
+               cm = per_cpu_ptr(m->maps, best_cpu);
                bit = matrix_alloc_area(m, cm, 1, false);
                if (bit < m->alloc_end) {
                        cm->allocated++;
@@ -338,8 +346,8 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
                        m->global_available--;
                        if (reserved)
                                m->global_reserved--;
-                       *mapped_cpu = cpu;
-                       trace_irq_matrix_alloc(bit, cpu, m, cm);
+                       *mapped_cpu = best_cpu;
+                       trace_irq_matrix_alloc(bit, best_cpu, m, cm);
                        return bit;
                }
        }
@@ -384,7 +392,9 @@ unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
 {
        struct cpumap *cm = this_cpu_ptr(m->maps);
 
-       return (m->global_available - cpudown) ? cm->available : 0;
+       if (!cpudown)
+               return m->global_available;
+       return m->global_available - cm->available;
 }
 
 /**
index edb987b..2f3c4f5 100644 (file)
@@ -339,6 +339,40 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
        return ret;
 }
 
+/*
+ * Carefully check whether the device can use reservation mode. If
+ * reservation mode is enabled then the early activation will assign a
+ * dummy vector to the device. If the PCI/MSI device does not support
+ * masking of the entry then this can result in spurious interrupts when
+ * the device driver is not absolutely careful. But even then a malfunction
+ * of the hardware could result in a spurious interrupt on the dummy vector
+ * and render the device unusable. If the entry can be masked then the core
+ * logic will prevent the spurious interrupt and reservation mode can be
+ * used. For now reservation mode is restricted to PCI/MSI.
+ */
+static bool msi_check_reservation_mode(struct irq_domain *domain,
+                                      struct msi_domain_info *info,
+                                      struct device *dev)
+{
+       struct msi_desc *desc;
+
+       if (domain->bus_token != DOMAIN_BUS_PCI_MSI)
+               return false;
+
+       if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
+               return false;
+
+       if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
+               return false;
+
+       /*
+        * Checking the first MSI descriptor is sufficient. MSIX supports
+        * masking and MSI does so when the maskbit is set.
+        */
+       desc = first_msi_entry(dev);
+       return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
+}
+
 /**
  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
  * @domain:    The domain to allocate from
@@ -353,9 +387,11 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 {
        struct msi_domain_info *info = domain->host_data;
        struct msi_domain_ops *ops = info->ops;
-       msi_alloc_info_t arg;
+       struct irq_data *irq_data;
        struct msi_desc *desc;
+       msi_alloc_info_t arg;
        int i, ret, virq;
+       bool can_reserve;
 
        ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
        if (ret)
@@ -385,6 +421,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
        if (ops->msi_finish)
                ops->msi_finish(&arg, 0);
 
+       can_reserve = msi_check_reservation_mode(domain, info, dev);
+
        for_each_msi_entry(desc, dev) {
                virq = desc->irq;
                if (desc->nvec_used == 1)
@@ -397,15 +435,25 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
                 * the MSI entries before the PCI layer enables MSI in the
                 * card. Otherwise the card latches a random msi message.
                 */
-               if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
-                       struct irq_data *irq_data;
+               if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
+                       continue;
 
+               irq_data = irq_domain_get_irq_data(domain, desc->irq);
+               if (!can_reserve)
+                       irqd_clr_can_reserve(irq_data);
+               ret = irq_domain_activate_irq(irq_data, can_reserve);
+               if (ret)
+                       goto cleanup;
+       }
+
+       /*
+        * If these interrupts use reservation mode, clear the activated bit
+        * so request_irq() will assign the final vector.
+        */
+       if (can_reserve) {
+               for_each_msi_entry(desc, dev) {
                        irq_data = irq_domain_get_irq_data(domain, desc->irq);
-                       ret = irq_domain_activate_irq(irq_data, true);
-                       if (ret)
-                               goto cleanup;
-                       if (info->flags & MSI_FLAG_MUST_REACTIVATE)
-                               irqd_clr_activated(irq_data);
+                       irqd_clr_activated(irq_data);
                }
        }
        return 0;
index 8594d24..b451709 100644 (file)
@@ -79,7 +79,7 @@ int static_key_count(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_count);
 
-static void static_key_slow_inc_cpuslocked(struct static_key *key)
+void static_key_slow_inc_cpuslocked(struct static_key *key)
 {
        int v, v1;
 
@@ -180,7 +180,7 @@ void static_key_disable(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-static void static_key_slow_dec_cpuslocked(struct static_key *key,
+static void __static_key_slow_dec_cpuslocked(struct static_key *key,
                                           unsigned long rate_limit,
                                           struct delayed_work *work)
 {
@@ -211,7 +211,7 @@ static void __static_key_slow_dec(struct static_key *key,
                                  struct delayed_work *work)
 {
        cpus_read_lock();
-       static_key_slow_dec_cpuslocked(key, rate_limit, work);
+       __static_key_slow_dec_cpuslocked(key, rate_limit, work);
        cpus_read_unlock();
 }
 
@@ -229,6 +229,12 @@ void static_key_slow_dec(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
+void static_key_slow_dec_cpuslocked(struct static_key *key)
+{
+       STATIC_KEY_CHECK_USE(key);
+       __static_key_slow_dec_cpuslocked(key, 0, NULL);
+}
+
 void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
        STATIC_KEY_CHECK_USE(key);
index 531ffa9..d5fa411 100644 (file)
@@ -614,14 +614,14 @@ static void s_stop(struct seq_file *m, void *p)
 
 static int s_show(struct seq_file *m, void *p)
 {
-       unsigned long value;
+       void *value;
        struct kallsym_iter *iter = m->private;
 
        /* Some debugging symbols have no name.  Ignore them. */
        if (!iter->name[0])
                return 0;
 
-       value = iter->show_value ? iter->value : 0;
+       value = iter->show_value ? (void *)iter->value : NULL;
 
        if (iter->module_name[0]) {
                char type;
@@ -632,10 +632,10 @@ static int s_show(struct seq_file *m, void *p)
                 */
                type = iter->exported ? toupper(iter->type) :
                                        tolower(iter->type);
-               seq_printf(m, KALLSYM_FMT " %c %s\t[%s]\n", value,
+               seq_printf(m, "%px %c %s\t[%s]\n", value,
                           type, iter->name, iter->module_name);
        } else
-               seq_printf(m, KALLSYM_FMT " %c %s\n", value,
+               seq_printf(m, "%px %c %s\n", value,
                           iter->type, iter->name);
        return 0;
 }
index 15f33fa..7594c03 100644 (file)
@@ -157,7 +157,7 @@ void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
 }
 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
 
-void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2)
+void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
 {
        write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
 }
@@ -183,7 +183,7 @@ void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
 }
 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
 
-void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2)
+void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
 {
        write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
                        _RET_IP_);
index 9776da8..5216590 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/gfp.h>
 #include <linux/random.h>
 #include <linux/jhash.h>
+#include <linux/nmi.h>
 
 #include <asm/sections.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/lock.h>
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-#include <linux/slab.h>
-#endif
-
 #ifdef CONFIG_PROVE_LOCKING
 int prove_locking = 1;
 module_param(prove_locking, int, 0644);
@@ -75,19 +72,6 @@ module_param(lock_stat, int, 0644);
 #define lock_stat 0
 #endif
 
-#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
-static int crossrelease_fullstack = 1;
-#else
-static int crossrelease_fullstack;
-#endif
-static int __init allow_crossrelease_fullstack(char *str)
-{
-       crossrelease_fullstack = 1;
-       return 0;
-}
-
-early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
-
 /*
  * lockdep_lock: protects the lockdep graph, the hashes and the
  *               class/list/hash allocators.
@@ -740,18 +724,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
 }
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-static void cross_init(struct lockdep_map *lock, int cross);
-static int cross_lock(struct lockdep_map *lock);
-static int lock_acquire_crosslock(struct held_lock *hlock);
-static int lock_release_crosslock(struct lockdep_map *lock);
-#else
-static inline void cross_init(struct lockdep_map *lock, int cross) {}
-static inline int cross_lock(struct lockdep_map *lock) { return 0; }
-static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
-static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
-#endif
-
 /*
  * Register a lock's class in the hash-table, if the class is not present
  * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1151,41 +1123,22 @@ print_circular_lock_scenario(struct held_lock *src,
                printk(KERN_CONT "\n\n");
        }
 
-       if (cross_lock(tgt->instance)) {
-               printk(" Possible unsafe locking scenario by crosslock:\n\n");
-               printk("       CPU0                    CPU1\n");
-               printk("       ----                    ----\n");
-               printk("  lock(");
-               __print_lock_name(parent);
-               printk(KERN_CONT ");\n");
-               printk("  lock(");
-               __print_lock_name(target);
-               printk(KERN_CONT ");\n");
-               printk("                               lock(");
-               __print_lock_name(source);
-               printk(KERN_CONT ");\n");
-               printk("                               unlock(");
-               __print_lock_name(target);
-               printk(KERN_CONT ");\n");
-               printk("\n *** DEADLOCK ***\n\n");
-       } else {
-               printk(" Possible unsafe locking scenario:\n\n");
-               printk("       CPU0                    CPU1\n");
-               printk("       ----                    ----\n");
-               printk("  lock(");
-               __print_lock_name(target);
-               printk(KERN_CONT ");\n");
-               printk("                               lock(");
-               __print_lock_name(parent);
-               printk(KERN_CONT ");\n");
-               printk("                               lock(");
-               __print_lock_name(target);
-               printk(KERN_CONT ");\n");
-               printk("  lock(");
-               __print_lock_name(source);
-               printk(KERN_CONT ");\n");
-               printk("\n *** DEADLOCK ***\n\n");
-       }
+       printk(" Possible unsafe locking scenario:\n\n");
+       printk("       CPU0                    CPU1\n");
+       printk("       ----                    ----\n");
+       printk("  lock(");
+       __print_lock_name(target);
+       printk(KERN_CONT ");\n");
+       printk("                               lock(");
+       __print_lock_name(parent);
+       printk(KERN_CONT ");\n");
+       printk("                               lock(");
+       __print_lock_name(target);
+       printk(KERN_CONT ");\n");
+       printk("  lock(");
+       __print_lock_name(source);
+       printk(KERN_CONT ");\n");
+       printk("\n *** DEADLOCK ***\n\n");
 }
 
 /*
@@ -1211,10 +1164,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
                curr->comm, task_pid_nr(curr));
        print_lock(check_src);
 
-       if (cross_lock(check_tgt->instance))
-               pr_warn("\nbut now in release context of a crosslock acquired at the following:\n");
-       else
-               pr_warn("\nbut task is already holding lock:\n");
+       pr_warn("\nbut task is already holding lock:\n");
 
        print_lock(check_tgt);
        pr_warn("\nwhich lock already depends on the new lock.\n\n");
@@ -1244,9 +1194,7 @@ static noinline int print_circular_bug(struct lock_list *this,
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return 0;
 
-       if (cross_lock(check_tgt->instance))
-               this->trace = *trace;
-       else if (!save_trace(&this->trace))
+       if (!save_trace(&this->trace))
                return 0;
 
        depth = get_lock_depth(target);
@@ -1850,9 +1798,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
                if (nest)
                        return 2;
 
-               if (cross_lock(prev->instance))
-                       continue;
-
                return print_deadlock_bug(curr, prev, next);
        }
        return 1;
@@ -2018,31 +1963,26 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
        for (;;) {
                int distance = curr->lockdep_depth - depth + 1;
                hlock = curr->held_locks + depth - 1;
+
                /*
-                * Only non-crosslock entries get new dependencies added.
-                * Crosslock entries will be added by commit later:
+                * Only non-recursive-read entries get new dependencies
+                * added:
                 */
-               if (!cross_lock(hlock->instance)) {
+               if (hlock->read != 2 && hlock->check) {
+                       int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
+                       if (!ret)
+                               return 0;
+
                        /*
-                        * Only non-recursive-read entries get new dependencies
-                        * added:
+                        * Stop after the first non-trylock entry,
+                        * as non-trylock entries have added their
+                        * own direct dependencies already, so this
+                        * lock is connected to them indirectly:
                         */
-                       if (hlock->read != 2 && hlock->check) {
-                               int ret = check_prev_add(curr, hlock, next,
-                                                        distance, &trace, save_trace);
-                               if (!ret)
-                                       return 0;
-
-                               /*
-                                * Stop after the first non-trylock entry,
-                                * as non-trylock entries have added their
-                                * own direct dependencies already, so this
-                                * lock is connected to them indirectly:
-                                */
-                               if (!hlock->trylock)
-                                       break;
-                       }
+                       if (!hlock->trylock)
+                               break;
                }
+
                depth--;
                /*
                 * End of lock-stack?
@@ -3292,21 +3232,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       cross_init(lock, 0);
        __lockdep_init_map(lock, name, key, subclass);
 }
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
-                     struct lock_class_key *key, int subclass)
-{
-       cross_init(lock, 1);
-       __lockdep_init_map(lock, name, key, subclass);
-}
-EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
-#endif
-
 struct lock_class_key __lockdep_no_validate__;
 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
 
@@ -3362,7 +3291,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        int chain_head = 0;
        int class_idx;
        u64 chain_key;
-       int ret;
 
        if (unlikely(!debug_locks))
                return 0;
@@ -3411,8 +3339,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
        class_idx = class - lock_classes + 1;
 
-       /* TODO: nest_lock is not implemented for crosslock yet. */
-       if (depth && !cross_lock(lock)) {
+       if (depth) {
                hlock = curr->held_locks + depth - 1;
                if (hlock->class_idx == class_idx && nest_lock) {
                        if (hlock->references) {
@@ -3500,14 +3427,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
                return 0;
 
-       ret = lock_acquire_crosslock(hlock);
-       /*
-        * 2 means normal acquire operations are needed. Otherwise, it's
-        * ok just to return with '0:fail, 1:success'.
-        */
-       if (ret != 2)
-               return ret;
-
        curr->curr_chain_key = chain_key;
        curr->lockdep_depth++;
        check_chain_key(curr);
@@ -3745,19 +3664,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
        struct task_struct *curr = current;
        struct held_lock *hlock;
        unsigned int depth;
-       int ret, i;
+       int i;
 
        if (unlikely(!debug_locks))
                return 0;
 
-       ret = lock_release_crosslock(lock);
-       /*
-        * 2 means normal release operations are needed. Otherwise, it's
-        * ok just to return with '0:fail, 1:success'.
-        */
-       if (ret != 2)
-               return ret;
-
        depth = curr->lockdep_depth;
        /*
         * So we're all set to release this lock.. wait what lock? We don't
@@ -4580,6 +4491,7 @@ retry:
                if (!unlock)
                        if (read_trylock(&tasklist_lock))
                                unlock = 1;
+               touch_nmi_watchdog();
        } while_each_thread(g, p);
 
        pr_warn("\n");
@@ -4675,494 +4587,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
        dump_stack();
 }
 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
-
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-
-/*
- * Crossrelease works by recording a lock history for each thread and
- * connecting those historic locks that were taken after the
- * wait_for_completion() in the complete() context.
- *
- * Task-A                              Task-B
- *
- *                                     mutex_lock(&A);
- *                                     mutex_unlock(&A);
- *
- * wait_for_completion(&C);
- *   lock_acquire_crosslock();
- *     atomic_inc_return(&cross_gen_id);
- *                                |
- *                               |     mutex_lock(&B);
- *                               |     mutex_unlock(&B);
- *                                |
- *                               |     complete(&C);
- *                               `--     lock_commit_crosslock();
- *
- * Which will then add a dependency between B and C.
- */
-
-#define xhlock(i)         (current->xhlocks[(i) % MAX_XHLOCKS_NR])
-
-/*
- * Whenever a crosslock is held, cross_gen_id will be increased.
- */
-static atomic_t cross_gen_id; /* Can be wrapped */
-
-/*
- * Make an entry of the ring buffer invalid.
- */
-static inline void invalidate_xhlock(struct hist_lock *xhlock)
-{
-       /*
-        * Normally, xhlock->hlock.instance must be !NULL.
-        */
-       xhlock->hlock.instance = NULL;
-}
-
-/*
- * Lock history stacks; we have 2 nested lock history stacks:
- *
- *   HARD(IRQ)
- *   SOFT(IRQ)
- *
- * The thing is that once we complete a HARD/SOFT IRQ the future task locks
- * should not depend on any of the locks observed while running the IRQ.  So
- * what we do is rewind the history buffer and erase all our knowledge of that
- * temporal event.
- */
-
-void crossrelease_hist_start(enum xhlock_context_t c)
-{
-       struct task_struct *cur = current;
-
-       if (!cur->xhlocks)
-               return;
-
-       cur->xhlock_idx_hist[c] = cur->xhlock_idx;
-       cur->hist_id_save[c]    = cur->hist_id;
-}
-
-void crossrelease_hist_end(enum xhlock_context_t c)
-{
-       struct task_struct *cur = current;
-
-       if (cur->xhlocks) {
-               unsigned int idx = cur->xhlock_idx_hist[c];
-               struct hist_lock *h = &xhlock(idx);
-
-               cur->xhlock_idx = idx;
-
-               /* Check if the ring was overwritten. */
-               if (h->hist_id != cur->hist_id_save[c])
-                       invalidate_xhlock(h);
-       }
-}
-
-/*
- * lockdep_invariant_state() is used to annotate independence inside a task, to
- * make one task look like multiple independent 'tasks'.
- *
- * Take for instance workqueues; each work is independent of the last. The
- * completion of a future work does not depend on the completion of a past work
- * (in general). Therefore we must not carry that (lock) dependency across
- * works.
- *
- * This is true for many things; pretty much all kthreads fall into this
- * pattern, where they have an invariant state and future completions do not
- * depend on past completions. Its just that since they all have the 'same'
- * form -- the kthread does the same over and over -- it doesn't typically
- * matter.
- *
- * The same is true for system-calls, once a system call is completed (we've
- * returned to userspace) the next system call does not depend on the lock
- * history of the previous system call.
- *
- * They key property for independence, this invariant state, is that it must be
- * a point where we hold no locks and have no history. Because if we were to
- * hold locks, the restore at _end() would not necessarily recover it's history
- * entry. Similarly, independence per-definition means it does not depend on
- * prior state.
- */
-void lockdep_invariant_state(bool force)
-{
-       /*
-        * We call this at an invariant point, no current state, no history.
-        * Verify the former, enforce the latter.
-        */
-       WARN_ON_ONCE(!force && current->lockdep_depth);
-       invalidate_xhlock(&xhlock(current->xhlock_idx));
-}
-
-static int cross_lock(struct lockdep_map *lock)
-{
-       return lock ? lock->cross : 0;
-}
-
-/*
- * This is needed to decide the relationship between wrapable variables.
- */
-static inline int before(unsigned int a, unsigned int b)
-{
-       return (int)(a - b) < 0;
-}
-
-static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
-{
-       return hlock_class(&xhlock->hlock);
-}
-
-static inline struct lock_class *xlock_class(struct cross_lock *xlock)
-{
-       return hlock_class(&xlock->hlock);
-}
-
-/*
- * Should we check a dependency with previous one?
- */
-static inline int depend_before(struct held_lock *hlock)
-{
-       return hlock->read != 2 && hlock->check && !hlock->trylock;
-}
-
-/*
- * Should we check a dependency with next one?
- */
-static inline int depend_after(struct held_lock *hlock)
-{
-       return hlock->read != 2 && hlock->check;
-}
-
-/*
- * Check if the xhlock is valid, which would be false if,
- *
- *    1. Has not used after initializaion yet.
- *    2. Got invalidated.
- *
- * Remind hist_lock is implemented as a ring buffer.
- */
-static inline int xhlock_valid(struct hist_lock *xhlock)
-{
-       /*
-        * xhlock->hlock.instance must be !NULL.
-        */
-       return !!xhlock->hlock.instance;
-}
-
-/*
- * Record a hist_lock entry.
- *
- * Irq disable is only required.
- */
-static void add_xhlock(struct held_lock *hlock)
-{
-       unsigned int idx = ++current->xhlock_idx;
-       struct hist_lock *xhlock = &xhlock(idx);
-
-#ifdef CONFIG_DEBUG_LOCKDEP
-       /*
-        * This can be done locklessly because they are all task-local
-        * state, we must however ensure IRQs are disabled.
-        */
-       WARN_ON_ONCE(!irqs_disabled());
-#endif
-
-       /* Initialize hist_lock's members */
-       xhlock->hlock = *hlock;
-       xhlock->hist_id = ++current->hist_id;
-
-       xhlock->trace.nr_entries = 0;
-       xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
-       xhlock->trace.entries = xhlock->trace_entries;
-
-       if (crossrelease_fullstack) {
-               xhlock->trace.skip = 3;
-               save_stack_trace(&xhlock->trace);
-       } else {
-               xhlock->trace.nr_entries = 1;
-               xhlock->trace.entries[0] = hlock->acquire_ip;
-       }
-}
-
-static inline int same_context_xhlock(struct hist_lock *xhlock)
-{
-       return xhlock->hlock.irq_context == task_irq_context(current);
-}
-
-/*
- * This should be lockless as far as possible because this would be
- * called very frequently.
- */
-static void check_add_xhlock(struct held_lock *hlock)
-{
-       /*
-        * Record a hist_lock, only in case that acquisitions ahead
-        * could depend on the held_lock. For example, if the held_lock
-        * is trylock then acquisitions ahead never depends on that.
-        * In that case, we don't need to record it. Just return.
-        */
-       if (!current->xhlocks || !depend_before(hlock))
-               return;
-
-       add_xhlock(hlock);
-}
-
-/*
- * For crosslock.
- */
-static int add_xlock(struct held_lock *hlock)
-{
-       struct cross_lock *xlock;
-       unsigned int gen_id;
-
-       if (!graph_lock())
-               return 0;
-
-       xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
-
-       /*
-        * When acquisitions for a crosslock are overlapped, we use
-        * nr_acquire to perform commit for them, based on cross_gen_id
-        * of the first acquisition, which allows to add additional
-        * dependencies.
-        *
-        * Moreover, when no acquisition of a crosslock is in progress,
-        * we should not perform commit because the lock might not exist
-        * any more, which might cause incorrect memory access. So we
-        * have to track the number of acquisitions of a crosslock.
-        *
-        * depend_after() is necessary to initialize only the first
-        * valid xlock so that the xlock can be used on its commit.
-        */
-       if (xlock->nr_acquire++ && depend_after(&xlock->hlock))
-               goto unlock;
-
-       gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
-       xlock->hlock = *hlock;
-       xlock->hlock.gen_id = gen_id;
-unlock:
-       graph_unlock();
-       return 1;
-}
-
-/*
- * Called for both normal and crosslock acquires. Normal locks will be
- * pushed on the hist_lock queue. Cross locks will record state and
- * stop regular lock_acquire() to avoid being placed on the held_lock
- * stack.
- *
- * Return: 0 - failure;
- *         1 - crosslock, done;
- *         2 - normal lock, continue to held_lock[] ops.
- */
-static int lock_acquire_crosslock(struct held_lock *hlock)
-{
-       /*
-        *      CONTEXT 1               CONTEXT 2
-        *      ---------               ---------
-        *      lock A (cross)
-        *      X = atomic_inc_return(&cross_gen_id)
-        *      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-        *                              Y = atomic_read_acquire(&cross_gen_id)
-        *                              lock B
-        *
-        * atomic_read_acquire() is for ordering between A and B,
-        * IOW, A happens before B, when CONTEXT 2 see Y >= X.
-        *
-        * Pairs with atomic_inc_return() in add_xlock().
-        */
-       hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
-
-       if (cross_lock(hlock->instance))
-               return add_xlock(hlock);
-
-       check_add_xhlock(hlock);
-       return 2;
-}
-
-static int copy_trace(struct stack_trace *trace)
-{
-       unsigned long *buf = stack_trace + nr_stack_trace_entries;
-       unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
-       unsigned int nr = min(max_nr, trace->nr_entries);
-
-       trace->nr_entries = nr;
-       memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
-       trace->entries = buf;
-       nr_stack_trace_entries += nr;
-
-       if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
-               if (!debug_locks_off_graph_unlock())
-                       return 0;
-
-               print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
-               dump_stack();
-
-               return 0;
-       }
-
-       return 1;
-}
-
-static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
-{
-       unsigned int xid, pid;
-       u64 chain_key;
-
-       xid = xlock_class(xlock) - lock_classes;
-       chain_key = iterate_chain_key((u64)0, xid);
-       pid = xhlock_class(xhlock) - lock_classes;
-       chain_key = iterate_chain_key(chain_key, pid);
-
-       if (lookup_chain_cache(chain_key))
-               return 1;
-
-       if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
-                               chain_key))
-               return 0;
-
-       if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
-                           &xhlock->trace, copy_trace))
-               return 0;
-
-       return 1;
-}
-
-static void commit_xhlocks(struct cross_lock *xlock)
-{
-       unsigned int cur = current->xhlock_idx;
-       unsigned int prev_hist_id = xhlock(cur).hist_id;
-       unsigned int i;
-
-       if (!graph_lock())
-               return;
-
-       if (xlock->nr_acquire) {
-               for (i = 0; i < MAX_XHLOCKS_NR; i++) {
-                       struct hist_lock *xhlock = &xhlock(cur - i);
-
-                       if (!xhlock_valid(xhlock))
-                               break;
-
-                       if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
-                               break;
-
-                       if (!same_context_xhlock(xhlock))
-                               break;
-
-                       /*
-                        * Filter out the cases where the ring buffer was
-                        * overwritten and the current entry has a bigger
-                        * hist_id than the previous one, which is impossible
-                        * otherwise:
-                        */
-                       if (unlikely(before(prev_hist_id, xhlock->hist_id)))
-                               break;
-
-                       prev_hist_id = xhlock->hist_id;
-
-                       /*
-                        * commit_xhlock() returns 0 with graph_lock already
-                        * released if fail.
-                        */
-                       if (!commit_xhlock(xlock, xhlock))
-                               return;
-               }
-       }
-
-       graph_unlock();
-}
-
-void lock_commit_crosslock(struct lockdep_map *lock)
-{
-       struct cross_lock *xlock;
-       unsigned long flags;
-
-       if (unlikely(!debug_locks || current->lockdep_recursion))
-               return;
-
-       if (!current->xhlocks)
-               return;
-
-       /*
-        * Do commit hist_locks with the cross_lock, only in case that
-        * the cross_lock could depend on acquisitions after that.
-        *
-        * For example, if the cross_lock does not have the 'check' flag
-        * then we don't need to check dependencies and commit for that.
-        * Just skip it. In that case, of course, the cross_lock does
-        * not depend on acquisitions ahead, either.
-        *
-        * WARNING: Don't do that in add_xlock() in advance. When an
-        * acquisition context is different from the commit context,
-        * invalid(skipped) cross_lock might be accessed.
-        */
-       if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
-               return;
-
-       raw_local_irq_save(flags);
-       check_flags(flags);
-       current->lockdep_recursion = 1;
-       xlock = &((struct lockdep_map_cross *)lock)->xlock;
-       commit_xhlocks(xlock);
-       current->lockdep_recursion = 0;
-       raw_local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(lock_commit_crosslock);
-
-/*
- * Return: 0 - failure;
- *         1 - crosslock, done;
- *         2 - normal lock, continue to held_lock[] ops.
- */
-static int lock_release_crosslock(struct lockdep_map *lock)
-{
-       if (cross_lock(lock)) {
-               if (!graph_lock())
-                       return 0;
-               ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--;
-               graph_unlock();
-               return 1;
-       }
-       return 2;
-}
-
-static void cross_init(struct lockdep_map *lock, int cross)
-{
-       if (cross)
-               ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0;
-
-       lock->cross = cross;
-
-       /*
-        * Crossrelease assumes that the ring buffer size of xhlocks
-        * is aligned with power of 2. So force it on build.
-        */
-       BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
-}
-
-void lockdep_init_task(struct task_struct *task)
-{
-       int i;
-
-       task->xhlock_idx = UINT_MAX;
-       task->hist_id = 0;
-
-       for (i = 0; i < XHLOCK_CTX_NR; i++) {
-               task->xhlock_idx_hist[i] = UINT_MAX;
-               task->hist_id_save[i] = 0;
-       }
-
-       task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
-                               GFP_KERNEL);
-}
-
-void lockdep_free_task(struct task_struct *task)
-{
-       if (task->xhlocks) {
-               void *tmp = task->xhlocks;
-               /* Diable crossrelease for current */
-               task->xhlocks = NULL;
-               kfree(tmp);
-       }
-}
-#endif
index 6f3dba6..65cc0cb 100644 (file)
@@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
        return ret;
 }
 
+static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
+{
+       int ret = try_to_take_rt_mutex(lock, current, NULL);
+
+       /*
+        * try_to_take_rt_mutex() sets the lock waiters bit
+        * unconditionally. Clean this up.
+        */
+       fixup_rt_mutex_waiters(lock);
+
+       return ret;
+}
+
 /*
  * Slow path try-lock function:
  */
@@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
         */
        raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
-       ret = try_to_take_rt_mutex(lock, current, NULL);
-
-       /*
-        * try_to_take_rt_mutex() sets the lock waiters bit
-        * unconditionally. Clean this up.
-        */
-       fixup_rt_mutex_waiters(lock);
+       ret = __rt_mutex_slowtrylock(lock);
 
        raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
@@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
        return rt_mutex_slowtrylock(lock);
 }
 
+int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+{
+       return __rt_mutex_slowtrylock(lock);
+}
+
 /**
  * rt_mutex_timed_lock - lock a rt_mutex interruptible
  *                     the timeout structure is provided
index 124e98c..68686b3 100644 (file)
@@ -148,6 +148,7 @@ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
                                 struct rt_mutex_waiter *waiter);
 
 extern int rt_mutex_futex_trylock(struct rt_mutex *l);
+extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
 
 extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
 extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
index 1fd1a75..936f3d1 100644 (file)
@@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock)                        \
                        break;                                          \
                preempt_enable();                                       \
                                                                        \
-               if (!(lock)->break_lock)                                \
-                       (lock)->break_lock = 1;                         \
-               while ((lock)->break_lock)                              \
-                       arch_##op##_relax(&lock->raw_lock);             \
+               arch_##op##_relax(&lock->raw_lock);                     \
        }                                                               \
-       (lock)->break_lock = 0;                                         \
 }                                                                      \
                                                                        \
 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
@@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)       \
                local_irq_restore(flags);                               \
                preempt_enable();                                       \
                                                                        \
-               if (!(lock)->break_lock)                                \
-                       (lock)->break_lock = 1;                         \
-               while ((lock)->break_lock)                              \
-                       arch_##op##_relax(&lock->raw_lock);             \
+               arch_##op##_relax(&lock->raw_lock);                     \
        }                                                               \
-       (lock)->break_lock = 0;                                         \
+                                                                       \
        return flags;                                                   \
 }                                                                      \
                                                                        \
index f0411a2..dea01ac 100644 (file)
@@ -4157,7 +4157,7 @@ static int m_show(struct seq_file *m, void *p)
 {
        struct module *mod = list_entry(p, struct module, list);
        char buf[MODULE_FLAGS_BUF_SIZE];
-       unsigned long value;
+       void *value;
 
        /* We always ignore unformed modules. */
        if (mod->state == MODULE_STATE_UNFORMED)
@@ -4173,8 +4173,8 @@ static int m_show(struct seq_file *m, void *p)
                   mod->state == MODULE_STATE_COMING ? "Loading" :
                   "Live");
        /* Used by oprofile and other similar tools. */
-       value = m->private ? 0 : (unsigned long)mod->core_layout.base;
-       seq_printf(m, " 0x" KALLSYM_FMT, value);
+       value = m->private ? NULL : mod->core_layout.base;
+       seq_printf(m, " 0x%px", value);
 
        /* Taints info */
        if (mod->taints)
index b13b624..5d30c87 100644 (file)
 #include <linux/sched/task.h>
 #include <linux/idr.h>
 
-struct pid init_struct_pid = INIT_STRUCT_PID;
+struct pid init_struct_pid = {
+       .count          = ATOMIC_INIT(1),
+       .tasks          = {
+               { .first = NULL },
+               { .first = NULL },
+               { .first = NULL },
+       },
+       .level          = 0,
+       .numbers        = { {
+               .nr             = 0,
+               .ns             = &init_pid_ns,
+       }, }
+};
 
 int pid_max = PID_MAX_DEFAULT;
 
@@ -193,10 +205,8 @@ struct pid *alloc_pid(struct pid_namespace *ns)
        }
 
        if (unlikely(is_child_reaper(pid))) {
-               if (pid_ns_prepare_proc(ns)) {
-                       disable_pid_allocation(ns);
+               if (pid_ns_prepare_proc(ns))
                        goto out_free;
-               }
        }
 
        get_pid_ns(ns);
@@ -226,6 +236,10 @@ out_free:
        while (++i <= ns->level)
                idr_remove(&ns->idr, (pid->numbers + i)->nr);
 
+       /* On failure to allocate the first pid, reset the state */
+       if (ns->pid_allocated == PIDNS_ADDING)
+               idr_set_cursor(&ns->idr, 0);
+
        spin_unlock_irq(&pidmap_lock);
 
        kmem_cache_free(ns->pid_cachep, pid);
index 3a2ca90..705c236 100644 (file)
@@ -22,6 +22,35 @@ DEFINE_MUTEX(pm_mutex);
 
 #ifdef CONFIG_PM_SLEEP
 
+void lock_system_sleep(void)
+{
+       current->flags |= PF_FREEZER_SKIP;
+       mutex_lock(&pm_mutex);
+}
+EXPORT_SYMBOL_GPL(lock_system_sleep);
+
+void unlock_system_sleep(void)
+{
+       /*
+        * Don't use freezer_count() because we don't want the call to
+        * try_to_freeze() here.
+        *
+        * Reason:
+        * Fundamentally, we just don't need it, because freezing condition
+        * doesn't come into effect until we release the pm_mutex lock,
+        * since the freezer always works with pm_mutex held.
+        *
+        * More importantly, in the case of hibernation,
+        * unlock_system_sleep() gets called in snapshot_read() and
+        * snapshot_write() when the freezing condition is still in effect.
+        * Which means, if we use try_to_freeze() here, it would make them
+        * enter the refrigerator, thus causing hibernation to lockup.
+        */
+       current->flags &= ~PF_FREEZER_SKIP;
+       mutex_unlock(&pm_mutex);
+}
+EXPORT_SYMBOL_GPL(unlock_system_sleep);
+
 /* Routines for PM-transition notifications */
 
 static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
index bce0464..3d37c27 100644 (file)
@@ -1645,8 +1645,7 @@ static unsigned long free_unnecessary_pages(void)
  * [number of saveable pages] - [number of pages that can be freed in theory]
  *
  * where the second term is the sum of (1) reclaimable slab pages, (2) active
- * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
- * minus mapped file pages.
+ * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
  */
 static unsigned long minimum_image_size(unsigned long saveable)
 {
@@ -1656,8 +1655,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
                + global_node_page_state(NR_ACTIVE_ANON)
                + global_node_page_state(NR_INACTIVE_ANON)
                + global_node_page_state(NR_ACTIVE_FILE)
-               + global_node_page_state(NR_INACTIVE_FILE)
-               - global_node_page_state(NR_FILE_MAPPED);
+               + global_node_page_state(NR_INACTIVE_FILE);
 
        return saveable <= size ? 0 : saveable - size;
 }
index 293ead5..a46be12 100644 (file)
@@ -879,7 +879,7 @@ out_clean:
  *     space avaiable from the resume partition.
  */
 
-static int enough_swap(unsigned int nr_pages, unsigned int flags)
+static int enough_swap(unsigned int nr_pages)
 {
        unsigned int free_swap = count_swap_pages(root_swap, 1);
        unsigned int required;
@@ -915,7 +915,7 @@ int swsusp_write(unsigned int flags)
                return error;
        }
        if (flags & SF_NOCOMPRESS_MODE) {
-               if (!enough_swap(pages, flags)) {
+               if (!enough_swap(pages)) {
                        pr_err("Not enough free swap\n");
                        error = -ENOSPC;
                        goto out_finish;
index 5d81206..b900661 100644 (file)
@@ -3141,9 +3141,6 @@ void dump_stack_print_info(const char *log_lvl)
 void show_regs_print_info(const char *log_lvl)
 {
        dump_stack_print_info(log_lvl);
-
-       printk("%stask: %p task.stack: %p\n",
-              log_lvl, current, task_stack_page(current));
 }
 
 #endif
index 2ddaec4..0926aef 100644 (file)
@@ -34,11 +34,6 @@ void complete(struct completion *x)
 
        spin_lock_irqsave(&x->wait.lock, flags);
 
-       /*
-        * Perform commit of crossrelease here.
-        */
-       complete_release_commit(x);
-
        if (x->done != UINT_MAX)
                x->done++;
        __wake_up_locked(&x->wait, TASK_NORMAL, 1);
index 75554f3..a7bf32a 100644 (file)
@@ -2056,7 +2056,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        p->state = TASK_WAKING;
 
        if (p->in_iowait) {
-               delayacct_blkio_end();
+               delayacct_blkio_end(p);
                atomic_dec(&task_rq(p)->nr_iowait);
        }
 
@@ -2069,7 +2069,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 #else /* CONFIG_SMP */
 
        if (p->in_iowait) {
-               delayacct_blkio_end();
+               delayacct_blkio_end(p);
                atomic_dec(&task_rq(p)->nr_iowait);
        }
 
@@ -2122,7 +2122,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
 
        if (!task_on_rq_queued(p)) {
                if (p->in_iowait) {
-                       delayacct_blkio_end();
+                       delayacct_blkio_end(p);
                        atomic_dec(&rq->nr_iowait);
                }
                ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
@@ -5097,17 +5097,6 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
        return ret;
 }
 
-/**
- * sys_sched_rr_get_interval - return the default timeslice of a process.
- * @pid: pid of the process.
- * @interval: userspace pointer to the timeslice value.
- *
- * this syscall writes the default timeslice value of a given process
- * into the user-space timespec buffer. A value of '0' means infinity.
- *
- * Return: On success, 0 and the timeslice is in @interval. Otherwise,
- * an error code.
- */
 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
 {
        struct task_struct *p;
@@ -5144,6 +5133,17 @@ out_unlock:
        return retval;
 }
 
+/**
+ * sys_sched_rr_get_interval - return the default timeslice of a process.
+ * @pid: pid of the process.
+ * @interval: userspace pointer to the timeslice value.
+ *
+ * this syscall writes the default timeslice value of a given process
+ * into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
+ */
 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                struct timespec __user *, interval)
 {
index 2f52ec0..d6717a3 100644 (file)
@@ -244,7 +244,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
 #ifdef CONFIG_NO_HZ_COMMON
 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
 {
-       unsigned long idle_calls = tick_nohz_get_idle_calls();
+       unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
        bool ret = idle_calls == sg_cpu->saved_idle_calls;
 
        sg_cpu->saved_idle_calls = idle_calls;
index 4037e19..26a71eb 100644 (file)
@@ -3413,9 +3413,9 @@ void set_task_rq_fair(struct sched_entity *se,
  * _IFF_ we look at the pure running and runnable sums. Because they
  * represent the very same entity, just at different points in the hierarchy.
  *
- *
- * Per the above update_tg_cfs_util() is trivial (and still 'wrong') and
- * simply copies the running sum over.
+ * Per the above update_tg_cfs_util() is trivial and simply copies the running
+ * sum over (but still wrong, because the group entity and group rq do not have
+ * their PELT windows aligned).
  *
  * However, update_tg_cfs_runnable() is more complex. So we have:
  *
@@ -3424,11 +3424,11 @@ void set_task_rq_fair(struct sched_entity *se,
  * And since, like util, the runnable part should be directly transferable,
  * the following would _appear_ to be the straight forward approach:
  *
- *   grq->avg.load_avg = grq->load.weight * grq->avg.running_avg       (3)
+ *   grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg      (3)
  *
  * And per (1) we have:
  *
- *   ge->avg.running_avg == grq->avg.running_avg
+ *   ge->avg.runnable_avg == grq->avg.runnable_avg
  *
  * Which gives:
  *
@@ -3447,27 +3447,28 @@ void set_task_rq_fair(struct sched_entity *se,
  * to (shortly) return to us. This only works by keeping the weights as
  * integral part of the sum. We therefore cannot decompose as per (3).
  *
- * OK, so what then?
+ * Another reason this doesn't work is that runnable isn't a 0-sum entity.
+ * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
+ * rq itself is runnable anywhere between 2/3 and 1 depending on how the
+ * runnable section of these tasks overlap (or not). If they were to perfectly
+ * align the rq as a whole would be runnable 2/3 of the time. If however we
+ * always have at least 1 runnable task, the rq as a whole is always runnable.
  *
+ * So we'll have to approximate.. :/
  *
- * Another way to look at things is:
+ * Given the constraint:
  *
- *   grq->avg.load_avg = \Sum se->avg.load_avg
+ *   ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
  *
- * Therefore, per (2):
+ * We can construct a rule that adds runnable to a rq by assuming minimal
+ * overlap.
  *
- *   grq->avg.load_avg = \Sum se->load.weight * se->avg.runnable_avg
+ * On removal, we'll assume each task is equally runnable; which yields:
  *
- * And the very thing we're propagating is a change in that sum (someone
- * joined/left). So we can easily know the runnable change, which would be, per
- * (2) the already tracked se->load_avg divided by the corresponding
- * se->weight.
+ *   grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
  *
- * Basically (4) but in differential form:
+ * XXX: only do this for the part of runnable > running ?
  *
- *   d(runnable_avg) += se->avg.load_avg / se->load.weight
- *                                                                (5)
- *   ge->avg.load_avg += ge->load.weight * d(runnable_avg)
  */
 
 static inline void
@@ -3479,6 +3480,14 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        if (!delta)
                return;
 
+       /*
+        * The relation between sum and avg is:
+        *
+        *   LOAD_AVG_MAX - 1024 + sa->period_contrib
+        *
+        * however, the PELT windows are not aligned between grq and gse.
+        */
+
        /* Set new sched_entity's utilization */
        se->avg.util_avg = gcfs_rq->avg.util_avg;
        se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
@@ -3491,33 +3500,68 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
 static inline void
 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long runnable_sum = gcfs_rq->prop_runnable_sum;
-       long runnable_load_avg, load_avg;
-       s64 runnable_load_sum, load_sum;
+       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       unsigned long runnable_load_avg, load_avg;
+       u64 runnable_load_sum, load_sum = 0;
+       s64 delta_sum;
 
        if (!runnable_sum)
                return;
 
        gcfs_rq->prop_runnable_sum = 0;
 
+       if (runnable_sum >= 0) {
+               /*
+                * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
+                * the CPU is saturated running == runnable.
+                */
+               runnable_sum += se->avg.load_sum;
+               runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
+       } else {
+               /*
+                * Estimate the new unweighted runnable_sum of the gcfs_rq by
+                * assuming all tasks are equally runnable.
+                */
+               if (scale_load_down(gcfs_rq->load.weight)) {
+                       load_sum = div_s64(gcfs_rq->avg.load_sum,
+                               scale_load_down(gcfs_rq->load.weight));
+               }
+
+               /* But make sure to not inflate se's runnable */
+               runnable_sum = min(se->avg.load_sum, load_sum);
+       }
+
+       /*
+        * runnable_sum can't be lower than running_sum
+        * As running sum is scale with cpu capacity wehreas the runnable sum
+        * is not we rescale running_sum 1st
+        */
+       running_sum = se->avg.util_sum /
+               arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+       runnable_sum = max(runnable_sum, running_sum);
+
        load_sum = (s64)se_weight(se) * runnable_sum;
        load_avg = div_s64(load_sum, LOAD_AVG_MAX);
 
-       add_positive(&se->avg.load_sum, runnable_sum);
-       add_positive(&se->avg.load_avg, load_avg);
+       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
+       delta_avg = load_avg - se->avg.load_avg;
 
-       add_positive(&cfs_rq->avg.load_avg, load_avg);
-       add_positive(&cfs_rq->avg.load_sum, load_sum);
+       se->avg.load_sum = runnable_sum;
+       se->avg.load_avg = load_avg;
+       add_positive(&cfs_rq->avg.load_avg, delta_avg);
+       add_positive(&cfs_rq->avg.load_sum, delta_sum);
 
        runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
        runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
+       delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
+       delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
 
-       add_positive(&se->avg.runnable_load_sum, runnable_sum);
-       add_positive(&se->avg.runnable_load_avg, runnable_load_avg);
+       se->avg.runnable_load_sum = runnable_sum;
+       se->avg.runnable_load_avg = runnable_load_avg;
 
        if (se->on_rq) {
-               add_positive(&cfs_rq->avg.runnable_load_avg, runnable_load_avg);
-               add_positive(&cfs_rq->avg.runnable_load_sum, runnable_load_sum);
+               add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
+               add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
        }
 }
 
@@ -4321,12 +4365,12 @@ static inline bool cfs_bandwidth_used(void)
 
 void cfs_bandwidth_usage_inc(void)
 {
-       static_key_slow_inc(&__cfs_bandwidth_used);
+       static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
 }
 
 void cfs_bandwidth_usage_dec(void)
 {
-       static_key_slow_dec(&__cfs_bandwidth_used);
+       static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
 }
 #else /* HAVE_JUMP_LABEL */
 static bool cfs_bandwidth_used(void)
index dd79087..9bcbacb 100644 (file)
@@ -89,7 +89,9 @@ static int membarrier_private_expedited(void)
                rcu_read_unlock();
        }
        if (!fallback) {
+               preempt_disable();
                smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
+               preempt_enable();
                free_cpumask_var(tmpmask);
        }
        cpus_read_unlock();
index 4056c19..665ace2 100644 (file)
@@ -2034,8 +2034,9 @@ static void pull_rt_task(struct rq *this_rq)
        bool resched = false;
        struct task_struct *p;
        struct rq *src_rq;
+       int rt_overload_count = rt_overloaded(this_rq);
 
-       if (likely(!rt_overloaded(this_rq)))
+       if (likely(!rt_overload_count))
                return;
 
        /*
@@ -2044,6 +2045,11 @@ static void pull_rt_task(struct rq *this_rq)
         */
        smp_rmb();
 
+       /* If we are the only overloaded CPU do nothing */
+       if (rt_overload_count == 1 &&
+           cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
+               return;
+
 #ifdef HAVE_RT_PUSH_IPI
        if (sched_feat(RT_PUSH_IPI)) {
                tell_cpu_to_push(this_rq);
index 98feab7..929ecb7 100644 (file)
@@ -27,7 +27,7 @@ void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
 
        wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
        spin_lock_irqsave(&wq_head->lock, flags);
-       __add_wait_queue_entry_tail(wq_head, wq_entry);
+       __add_wait_queue(wq_head, wq_entry);
        spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(add_wait_queue);
index e776fc8..f6b5f19 100644 (file)
@@ -95,6 +95,7 @@ config NO_HZ_FULL
        select RCU_NOCB_CPU
        select VIRT_CPU_ACCOUNTING_GEN
        select IRQ_WORK
+       select CPU_ISOLATION
        help
         Adaptively try to shutdown the tick whenever possible, even when
         the CPU is running tasks. Typically this requires running a single
index d325208..aa9d2a2 100644 (file)
@@ -655,7 +655,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
 {
        base->expires_next = KTIME_MAX;
+       base->hang_detected = 0;
        base->hres_active = 0;
+       base->next_timer = NULL;
 }
 
 /*
@@ -1589,6 +1591,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
                timerqueue_init_head(&cpu_base->clock_base[i].active);
        }
 
+       cpu_base->active_bases = 0;
        cpu_base->cpu = cpu;
        hrtimer_init_hres(cpu_base);
        return 0;
index 13d6881..ec999f3 100644 (file)
@@ -434,17 +434,22 @@ static struct pid *good_sigevent(sigevent_t * event)
 {
        struct task_struct *rtn = current->group_leader;
 
-       if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
-               (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-                !same_thread_group(rtn, current) ||
-                (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
+       switch (event->sigev_notify) {
+       case SIGEV_SIGNAL | SIGEV_THREAD_ID:
+               rtn = find_task_by_vpid(event->sigev_notify_thread_id);
+               if (!rtn || !same_thread_group(rtn, current))
+                       return NULL;
+               /* FALLTHRU */
+       case SIGEV_SIGNAL:
+       case SIGEV_THREAD:
+               if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
+                       return NULL;
+               /* FALLTHRU */
+       case SIGEV_NONE:
+               return task_pid(rtn);
+       default:
                return NULL;
-
-       if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
-           ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
-               return NULL;
-
-       return task_pid(rtn);
+       }
 }
 
 static struct k_itimer * alloc_posix_timer(void)
@@ -669,7 +674,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
        struct timespec64 ts64;
        bool sig_none;
 
-       sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+       sig_none = timr->it_sigev_notify == SIGEV_NONE;
        iv = timr->it_interval;
 
        /* interval timer ? */
@@ -856,7 +861,7 @@ int common_timer_set(struct k_itimer *timr, int flags,
 
        timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
        expires = timespec64_to_ktime(new_setting->it_value);
-       sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+       sigev_none = timr->it_sigev_notify == SIGEV_NONE;
 
        kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
        timr->it_active = !sigev_none;
index 99578f0..f7cc7ab 100644 (file)
@@ -650,6 +650,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
        ts->next_tick = 0;
 }
 
+static inline bool local_timer_softirq_pending(void)
+{
+       return local_softirq_pending() & TIMER_SOFTIRQ;
+}
+
 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
                                         ktime_t now, int cpu)
 {
@@ -666,8 +671,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
        } while (read_seqretry(&jiffies_lock, seq));
        ts->last_jiffies = basejiff;
 
-       if (rcu_needs_cpu(basemono, &next_rcu) ||
-           arch_needs_cpu() || irq_work_needs_cpu()) {
+       /*
+        * Keep the periodic tick, when RCU, architecture or irq_work
+        * requests it.
+        * Aside of that check whether the local timer softirq is
+        * pending. If so its a bad idea to call get_next_timer_interrupt()
+        * because there is an already expired timer, so it will request
+        * immeditate expiry, which rearms the hardware timer with a
+        * minimal delta which brings us back to this place
+        * immediately. Lather, rinse and repeat...
+        */
+       if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
+           irq_work_needs_cpu() || local_timer_softirq_pending()) {
                next_tick = basemono + TICK_NSEC;
        } else {
                /*
@@ -985,6 +1000,19 @@ ktime_t tick_nohz_get_sleep_length(void)
        return ts->sleep_length;
 }
 
+/**
+ * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
+ * for a particular CPU.
+ *
+ * Called from the schedutil frequency scaling governor in scheduler context.
+ */
+unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
+{
+       struct tick_sched *ts = tick_get_tick_sched(cpu);
+
+       return ts->idle_calls;
+}
+
 /**
  * tick_nohz_get_idle_calls - return the current idle calls counter value
  *
index ffebcf8..0bcf00e 100644 (file)
@@ -823,11 +823,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
        struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
 
        /*
-        * If the timer is deferrable and nohz is active then we need to use
-        * the deferrable base.
+        * If the timer is deferrable and NO_HZ_COMMON is set then we need
+        * to use the deferrable base.
         */
-       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
-           (tflags & TIMER_DEFERRABLE))
+       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
                base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
        return base;
 }
@@ -837,11 +836,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
        struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
        /*
-        * If the timer is deferrable and nohz is active then we need to use
-        * the deferrable base.
+        * If the timer is deferrable and NO_HZ_COMMON is set then we need
+        * to use the deferrable base.
         */
-       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
-           (tflags & TIMER_DEFERRABLE))
+       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
                base = this_cpu_ptr(&timer_bases[BASE_DEF]);
        return base;
 }
@@ -1009,8 +1007,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
        if (!ret && (options & MOD_TIMER_PENDING_ONLY))
                goto out_unlock;
 
-       debug_activate(timer, expires);
-
        new_base = get_target_base(base, timer->flags);
 
        if (base != new_base) {
@@ -1034,6 +1030,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
                }
        }
 
+       debug_activate(timer, expires);
+
        timer->expires = expires;
        /*
         * If 'idx' was calculated above and the base time did not advance
@@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
        base->must_forward_clk = false;
 
        __run_timers(base);
-       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+       if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
                __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
 }
 
@@ -1698,7 +1696,7 @@ void run_local_timers(void)
        hrtimer_run_queues();
        /* Raise the softirq only if required. */
        if (time_before(jiffies, base->clk)) {
-               if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+               if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
                        return;
                /* CPU is awake, so check the deferrable base. */
                base++;
@@ -1855,6 +1853,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h
        }
 }
 
+int timers_prepare_cpu(unsigned int cpu)
+{
+       struct timer_base *base;
+       int b;
+
+       for (b = 0; b < NR_BASES; b++) {
+               base = per_cpu_ptr(&timer_bases[b], cpu);
+               base->clk = jiffies;
+               base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
+               base->is_idle = false;
+               base->must_forward_clk = true;
+       }
+       return 0;
+}
+
 int timers_dead_cpu(unsigned int cpu)
 {
        struct timer_base *old_base;
index af7dad1..f54dc62 100644 (file)
@@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS
        bool "Enable trace events for preempt and irq disable/enable"
        select TRACE_IRQFLAGS
        depends on DEBUG_PREEMPT || !PROVE_LOCKING
+       depends on TRACING
        default n
        help
          Enable tracing of disable and enable events for preemption and irqs.
@@ -354,7 +355,7 @@ config PROFILE_ANNOTATED_BRANCHES
          on if you need to profile the system's use of these macros.
 
 config PROFILE_ALL_BRANCHES
-       bool "Profile all if conditionals"
+       bool "Profile all if conditionals" if !FORTIFY_SOURCE
        select TRACE_BRANCH_PROFILING
        help
          This tracer profiles all branch conditions. Every if ()
index 206e0e2..987d9a9 100644 (file)
@@ -591,7 +591,7 @@ static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
                return ret;
 
        if (copy_to_user(arg, &buts, sizeof(buts))) {
-               blk_trace_remove(q);
+               __blk_trace_remove(q);
                return -EFAULT;
        }
        return 0;
@@ -637,7 +637,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
                return ret;
 
        if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
-               blk_trace_remove(q);
+               __blk_trace_remove(q);
                return -EFAULT;
        }
 
@@ -872,7 +872,7 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
  *
  **/
 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-                             u32 what, int error, union kernfs_node_id *cgid)
+                             u32 what, int error)
 {
        struct blk_trace *bt = q->blk_trace;
 
@@ -880,22 +880,21 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
                return;
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid);
+                       bio_op(bio), bio->bi_opf, what, error, 0, NULL,
+                       blk_trace_bio_get_cgid(q, bio));
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
                                     struct request_queue *q, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
 static void blk_add_trace_bio_complete(void *ignore,
                                       struct request_queue *q, struct bio *bio,
                                       int error)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
 static void blk_add_trace_bio_backmerge(void *ignore,
@@ -903,8 +902,7 @@ static void blk_add_trace_bio_backmerge(void *ignore,
                                        struct request *rq,
                                        struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0,
-                        blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 }
 
 static void blk_add_trace_bio_frontmerge(void *ignore,
@@ -912,15 +910,13 @@ static void blk_add_trace_bio_frontmerge(void *ignore,
                                         struct request *rq,
                                         struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 }
 
 static void blk_add_trace_bio_queue(void *ignore,
                                    struct request_queue *q, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 }
 
 static void blk_add_trace_getrq(void *ignore,
@@ -928,8 +924,7 @@ static void blk_add_trace_getrq(void *ignore,
                                struct bio *bio, int rw)
 {
        if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0,
-                                 blk_trace_bio_get_cgid(q, bio));
+               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
        else {
                struct blk_trace *bt = q->blk_trace;
 
@@ -945,8 +940,7 @@ static void blk_add_trace_sleeprq(void *ignore,
                                  struct bio *bio, int rw)
 {
        if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0,
-                                 blk_trace_bio_get_cgid(q, bio));
+               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
        else {
                struct blk_trace *bt = q->blk_trace;
 
index 27d1f4f..40207c2 100644 (file)
@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
        .arg4_type      = ARG_CONST_SIZE,
 };
 
-static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
+static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
 
 static __always_inline u64
 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
-                       u64 flags, struct perf_raw_record *raw)
+                       u64 flags, struct perf_sample_data *sd)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
-       struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
        unsigned int cpu = smp_processor_id();
        u64 index = flags & BPF_F_INDEX_MASK;
        struct bpf_event_entry *ee;
@@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
        if (unlikely(event->oncpu != cpu))
                return -EOPNOTSUPP;
 
-       perf_sample_data_init(sd, 0, 0);
-       sd->raw = raw;
        perf_event_output(event, sd, regs);
        return 0;
 }
@@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
           u64, flags, void *, data, u64, size)
 {
+       struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
        struct perf_raw_record raw = {
                .frag = {
                        .size = size,
@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
        if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
                return -EINVAL;
 
-       return __bpf_perf_event_output(regs, map, flags, &raw);
+       perf_sample_data_init(sd, 0, 0);
+       sd->raw = &raw;
+
+       return __bpf_perf_event_output(regs, map, flags, sd);
 }
 
 static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
 };
 
 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
+static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
 
 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
                     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 {
+       struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
        struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
        struct perf_raw_frag frag = {
                .copy           = ctx_copy,
@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
        };
 
        perf_fetch_caller_regs(regs);
+       perf_sample_data_init(sd, 0, 0);
+       sd->raw = &raw;
 
-       return __bpf_perf_event_output(regs, map, flags, &raw);
+       return __bpf_perf_event_output(regs, map, flags, sd);
 }
 
 BPF_CALL_0(bpf_get_current_task)
@@ -759,6 +764,8 @@ const struct bpf_prog_ops perf_event_prog_ops = {
 
 static DEFINE_MUTEX(bpf_event_mutex);
 
+#define BPF_TRACE_MAX_PROGS 64
+
 int perf_event_attach_bpf_prog(struct perf_event *event,
                               struct bpf_prog *prog)
 {
@@ -772,6 +779,12 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
                goto unlock;
 
        old_array = event->tp_event->prog_array;
+       if (old_array &&
+           bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
+               ret = -E2BIG;
+               goto unlock;
+       }
+
        ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
        if (ret < 0)
                goto unlock;
index ccdf366..554b517 100644 (file)
@@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = {
 };
 
 /*
- * This is used by __kernel_text_address() to return true if the
- * address is on a dynamically allocated trampoline that would
- * not return true for either core_kernel_text() or
- * is_module_text_address().
+ * Used by the stack undwinder to know about dynamic ftrace trampolines.
  */
-bool is_ftrace_trampoline(unsigned long addr)
+struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
 {
-       struct ftrace_ops *op;
-       bool ret = false;
+       struct ftrace_ops *op = NULL;
 
        /*
         * Some of the ops may be dynamically allocated,
@@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr)
                if (op->trampoline && op->trampoline_size)
                        if (addr >= op->trampoline &&
                            addr < op->trampoline + op->trampoline_size) {
-                               ret = true;
-                               goto out;
+                               preempt_enable_notrace();
+                               return op;
                        }
        } while_for_each_ftrace_op(op);
-
- out:
        preempt_enable_notrace();
 
-       return ret;
+       return NULL;
+}
+
+/*
+ * This is used by __kernel_text_address() to return true if the
+ * address is on a dynamically allocated trampoline that would
+ * not return true for either core_kernel_text() or
+ * is_module_text_address().
+ */
+bool is_ftrace_trampoline(unsigned long addr)
+{
+       return ftrace_ops_trampoline(addr) != NULL;
 }
 
 struct ftrace_page {
index 91874a9..5af2842 100644 (file)
@@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 /* Missed count stored at end */
 #define RB_MISSED_STORED       (1 << 30)
 
+#define RB_MISSED_FLAGS                (RB_MISSED_EVENTS|RB_MISSED_STORED)
+
 struct buffer_data_page {
        u64              time_stamp;    /* page time stamp */
        local_t          commit;        /* write committed index */
@@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
  */
 size_t ring_buffer_page_len(void *page)
 {
-       return local_read(&((struct buffer_data_page *)page)->commit)
+       struct buffer_data_page *bpage = page;
+
+       return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
                + BUF_PAGE_HDR_SIZE;
 }
 
@@ -1799,12 +1803,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
 
-static __always_inline void *
-__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
-{
-       return bpage->data + index;
-}
-
 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
 {
        return bpage->page->data + index;
@@ -2536,29 +2534,58 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
  * The lock and unlock are done within a preempt disable section.
  * The current_context per_cpu variable can only be modified
  * by the current task between lock and unlock. But it can
- * be modified more than once via an interrupt. There are four
- * different contexts that we need to consider.
+ * be modified more than once via an interrupt. To pass this
+ * information from the lock to the unlock without having to
+ * access the 'in_interrupt()' functions again (which do show
+ * a bit of overhead in something as critical as function tracing,
+ * we use a bitmask trick.
+ *
+ *  bit 0 =  NMI context
+ *  bit 1 =  IRQ context
+ *  bit 2 =  SoftIRQ context
+ *  bit 3 =  normal context.
+ *
+ * This works because this is the order of contexts that can
+ * preempt other contexts. A SoftIRQ never preempts an IRQ
+ * context.
+ *
+ * When the context is determined, the corresponding bit is
+ * checked and set (if it was set, then a recursion of that context
+ * happened).
+ *
+ * On unlock, we need to clear this bit. To do so, just subtract
+ * 1 from the current_context and AND it to itself.
  *
- *  Normal context.
- *  SoftIRQ context
- *  IRQ context
- *  NMI context
+ * (binary)
+ *  101 - 1 = 100
+ *  101 & 100 = 100 (clearing bit zero)
  *
- * If for some reason the ring buffer starts to recurse, we
- * only allow that to happen at most 4 times (one for each
- * context). If it happens 5 times, then we consider this a
- * recusive loop and do not let it go further.
+ *  1010 - 1 = 1001
+ *  1010 & 1001 = 1000 (clearing bit 1)
+ *
+ * The least significant bit can be cleared this way, and it
+ * just so happens that it is the same bit corresponding to
+ * the current context.
  */
 
 static __always_inline int
 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       if (cpu_buffer->current_context >= 4)
+       unsigned int val = cpu_buffer->current_context;
+       unsigned long pc = preempt_count();
+       int bit;
+
+       if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+               bit = RB_CTX_NORMAL;
+       else
+               bit = pc & NMI_MASK ? RB_CTX_NMI :
+                       pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+
+       if (unlikely(val & (1 << bit)))
                return 1;
 
-       cpu_buffer->current_context++;
-       /* Interrupts must see this update */
-       barrier();
+       val |= (1 << bit);
+       cpu_buffer->current_context = val;
 
        return 0;
 }
@@ -2566,9 +2593,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 static __always_inline void
 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       /* Don't let the dec leak out */
-       barrier();
-       cpu_buffer->current_context--;
+       cpu_buffer->current_context &= cpu_buffer->current_context - 1;
 }
 
 /**
@@ -4406,8 +4431,13 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct buffer_data_page *bpage = data;
+       struct page *page = virt_to_page(bpage);
        unsigned long flags;
 
+       /* If the page is still in use someplace else, we can't reuse it */
+       if (page_ref_count(page) > 1)
+               goto out;
+
        local_irq_save(flags);
        arch_spin_lock(&cpu_buffer->lock);
 
@@ -4419,6 +4449,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
        arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
+ out:
        free_page((unsigned long)bpage);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
index 73e67b6..8e3f20a 100644 (file)
@@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct
 }
 
 /**
- * trace_pid_filter_add_remove - Add or remove a task from a pid_list
+ * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
  * @pid_list: The list to modify
  * @self: The current task for fork or NULL for exit
  * @task: The task to add or remove
@@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr)
 }
 
 /**
- * trace_snapshot - take a snapshot of the current buffer.
+ * tracing_snapshot - take a snapshot of the current buffer.
  *
  * This causes a swap between the snapshot buffer and the current live
  * tracing buffer. You can use this to take snapshots of the live
@@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void)
 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 
 /**
- * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
+ * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
  *
- * This is similar to trace_snapshot(), but it will allocate the
+ * This is similar to tracing_snapshot(), but it will allocate the
  * snapshot buffer if it isn't already allocated. Use this only
  * where it is safe to sleep, as the allocation may sleep.
  *
@@ -1303,7 +1303,7 @@ unsigned long __read_mostly       tracing_thresh;
 /*
  * Copy the new maximum trace into the separate maximum-trace
  * structure. (this way the maximum trace is permanently saved,
- * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
+ * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
  */
 static void
 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
+/*
+ * Skip 3:
+ *
+ *   trace_buffer_unlock_commit_regs()
+ *   trace_event_buffer_commit()
+ *   trace_event_raw_event_xxx()
+*/
+# define STACK_SKIP 3
+
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
                                     struct ring_buffer *buffer,
                                     struct ring_buffer_event *event,
@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
        __buffer_unlock_commit(buffer, event);
 
        /*
-        * If regs is not set, then skip the following callers:
-        *   trace_buffer_unlock_commit_regs
-        *   event_trigger_unlock_commit
-        *   trace_event_buffer_commit
-        *   trace_event_raw_event_sched_switch
+        * If regs is not set, then skip the necessary functions.
         * Note, we can still get here via blktrace, wakeup tracer
         * and mmiotrace, but that's ok if they lose a function or
-        * two. They are that meaningful.
+        * two. They are not that meaningful.
         */
-       ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
+       ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
        ftrace_trace_userstack(buffer, flags, pc);
 }
 
@@ -2415,7 +2420,7 @@ trace_process_export(struct trace_export *export,
 
        entry = ring_buffer_event_data(event);
        size = ring_buffer_event_length(event);
-       export->write(entry, size);
+       export->write(export, entry, size);
 }
 
 static DEFINE_MUTEX(ftrace_export_lock);
@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
        trace.skip              = skip;
 
        /*
-        * Add two, for this function and the call to save_stack_trace()
+        * Add one, for this function and the call to save_stack_trace()
         * If regs is set, then these functions will not be in the way.
         */
+#ifndef CONFIG_UNWINDER_ORC
        if (!regs)
-               trace.skip += 2;
+               trace.skip++;
+#endif
 
        /*
         * Since events can happen in NMIs there's no safe way to
@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
 
        local_save_flags(flags);
 
-       /*
-        * Skip 3 more, seems to get us at the caller of
-        * this function.
-        */
-       skip += 3;
+#ifndef CONFIG_UNWINDER_ORC
+       /* Skip 1 to skip this function. */
+       skip++;
+#endif
        __ftrace_trace_stack(global_trace.trace_buffer.buffer,
                             flags, skip, preempt_count(), NULL);
 }
@@ -4178,37 +4184,30 @@ static const struct file_operations show_traces_fops = {
        .llseek         = seq_lseek,
 };
 
-/*
- * The tracer itself will not take this lock, but still we want
- * to provide a consistent cpumask to user-space:
- */
-static DEFINE_MUTEX(tracing_cpumask_update_lock);
-
-/*
- * Temporary storage for the character representation of the
- * CPU bitmask (and one more byte for the newline):
- */
-static char mask_str[NR_CPUS + 1];
-
 static ssize_t
 tracing_cpumask_read(struct file *filp, char __user *ubuf,
                     size_t count, loff_t *ppos)
 {
        struct trace_array *tr = file_inode(filp)->i_private;
+       char *mask_str;
        int len;
 
-       mutex_lock(&tracing_cpumask_update_lock);
+       len = snprintf(NULL, 0, "%*pb\n",
+                      cpumask_pr_args(tr->tracing_cpumask)) + 1;
+       mask_str = kmalloc(len, GFP_KERNEL);
+       if (!mask_str)
+               return -ENOMEM;
 
-       len = snprintf(mask_str, count, "%*pb\n",
+       len = snprintf(mask_str, len, "%*pb\n",
                       cpumask_pr_args(tr->tracing_cpumask));
        if (len >= count) {
                count = -EINVAL;
                goto out_err;
        }
-       count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+       count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
 
 out_err:
-       mutex_unlock(&tracing_cpumask_update_lock);
+       kfree(mask_str);
 
        return count;
 }
@@ -4228,8 +4227,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
        if (err)
                goto err_unlock;
 
-       mutex_lock(&tracing_cpumask_update_lock);
-
        local_irq_disable();
        arch_spin_lock(&tr->max_lock);
        for_each_tracing_cpu(cpu) {
@@ -4252,8 +4249,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
        local_irq_enable();
 
        cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
-
-       mutex_unlock(&tracing_cpumask_update_lock);
        free_cpumask_var(tracing_cpumask_new);
 
        return count;
@@ -6780,7 +6775,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                .spd_release    = buffer_spd_release,
        };
        struct buffer_ref *ref;
-       int entries, size, i;
+       int entries, i;
        ssize_t ret = 0;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -6834,14 +6829,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                        break;
                }
 
-               /*
-                * zero out any left over data, this is going to
-                * user land.
-                */
-               size = ring_buffer_page_len(ref->page);
-               if (size < PAGE_SIZE)
-                       memset(ref->page + size, 0, PAGE_SIZE - size);
-
                page = virt_to_page(ref->page);
 
                spd.pages[i] = page;
@@ -7599,6 +7586,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
        buf->data = alloc_percpu(struct trace_array_cpu);
        if (!buf->data) {
                ring_buffer_free(buf->buffer);
+               buf->buffer = NULL;
                return -ENOMEM;
        }
 
@@ -7622,7 +7610,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
                                    allocate_snapshot ? size : 1);
        if (WARN_ON(ret)) {
                ring_buffer_free(tr->trace_buffer.buffer);
+               tr->trace_buffer.buffer = NULL;
                free_percpu(tr->trace_buffer.data);
+               tr->trace_buffer.data = NULL;
                return -ENOMEM;
        }
        tr->allocated_snapshot = allocate_snapshot;
index ec0f9aa..1b87157 100644 (file)
@@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
 {
        struct trace_event_call *call, *p;
        const char *last_system = NULL;
+       bool first = false;
        int last_i;
        int i;
 
@@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
        list_for_each_entry_safe(call, p, &ftrace_events, list) {
                /* events are usually grouped together with systems */
                if (!last_system || call->class->system != last_system) {
+                       first = true;
                        last_i = 0;
                        last_system = call->class->system;
                }
 
+               /*
+                * Since calls are grouped by systems, the likelyhood that the
+                * next call in the iteration belongs to the same system as the
+                * previous call is high. As an optimization, we skip seaching
+                * for a map[] that matches the call's system if the last call
+                * was from the same system. That's what last_i is for. If the
+                * call has the same system as the previous call, then last_i
+                * will be the index of the first map[] that has a matching
+                * system.
+                */
                for (i = last_i; i < len; i++) {
                        if (call->class->system == map[i]->system) {
                                /* Save the first system if need be */
-                               if (!last_i)
+                               if (first) {
                                        last_i = i;
+                                       first = false;
+                               }
                                update_event_printk(call, map[i]);
                        }
                }
index f2ac9d4..8741148 100644 (file)
@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
 #ifdef CONFIG_STACKTRACE
+#ifdef CONFIG_UNWINDER_ORC
+/* Skip 2:
+ *   event_triggers_post_call()
+ *   trace_event_raw_event_xxx()
+ */
+# define STACK_SKIP 2
+#else
 /*
- * Skip 3:
+ * Skip 4:
  *   stacktrace_trigger()
  *   event_triggers_post_call()
+ *   trace_event_buffer_commit()
  *   trace_event_raw_event_xxx()
  */
-#define STACK_SKIP 3
+#define STACK_SKIP 4
+#endif
 
 static void
 stacktrace_trigger(struct event_trigger_data *data, void *rec)
index 27f7ad1..b611cd3 100644 (file)
@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
        preempt_enable_notrace();
 }
 
+#ifdef CONFIG_UNWINDER_ORC
+/*
+ * Skip 2:
+ *
+ *   function_stack_trace_call()
+ *   ftrace_call()
+ */
+#define STACK_SKIP 2
+#else
+/*
+ * Skip 3:
+ *   __trace_stack()
+ *   function_stack_trace_call()
+ *   ftrace_call()
+ */
+#define STACK_SKIP 3
+#endif
+
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
        if (likely(disabled == 1)) {
                pc = preempt_count();
                trace_function(tr, ip, parent_ip, flags, pc);
-               /*
-                * skip over 5 funcs:
-                *    __ftrace_trace_stack,
-                *    __trace_stack,
-                *    function_stack_trace_call
-                *    ftrace_list_func
-                *    ftrace_call
-                */
-               __trace_stack(tr, flags, 5, pc);
+               __trace_stack(tr, flags, STACK_SKIP, pc);
        }
 
        atomic_dec(&data->disabled);
@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
        tracer_tracing_off(tr);
 }
 
+#ifdef CONFIG_UNWINDER_ORC
 /*
- * Skip 4:
+ * Skip 3:
+ *
+ *   function_trace_probe_call()
+ *   ftrace_ops_assist_func()
+ *   ftrace_call()
+ */
+#define FTRACE_STACK_SKIP 3
+#else
+/*
+ * Skip 5:
+ *
+ *   __trace_stack()
  *   ftrace_stacktrace()
  *   function_trace_probe_call()
- *   ftrace_ops_list_func()
+ *   ftrace_ops_assist_func()
  *   ftrace_call()
  */
-#define STACK_SKIP 4
+#define FTRACE_STACK_SKIP 5
+#endif
 
 static __always_inline void trace_stack(struct trace_array *tr)
 {
@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
        local_save_flags(flags);
        pc = preempt_count();
 
-       __trace_stack(tr, flags, STACK_SKIP, pc);
+       __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
 }
 
 static void
index 734accc..3c7bfc4 100644 (file)
@@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
        if (__this_cpu_read(disable_stack_tracer) != 1)
                goto out;
 
+       /* If rcu is not watching, then save stack trace can fail */
+       if (!rcu_is_watching())
+               goto out;
+
        ip += MCOUNT_INSN_SIZE;
 
        check_stack(ip, &stack);
index ce74a49..ef1da2a 100644 (file)
@@ -192,6 +192,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
                return retval;
        }
 
+       groups_sort(group_info);
        retval = set_current_groups(group_info);
        put_group_info(group_info);
 
index 8fdb710..f699122 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/hardirq.h>
 #include <linux/mempolicy.h>
 #include <linux/freezer.h>
-#include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
 #include <linux/idr.h>
@@ -48,6 +47,8 @@
 #include <linux/nodemask.h>
 #include <linux/moduleparam.h>
 #include <linux/uaccess.h>
+#include <linux/sched/isolation.h>
+#include <linux/nmi.h>
 
 #include "workqueue_internal.h"
 
@@ -1634,7 +1635,7 @@ static void worker_enter_idle(struct worker *worker)
                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 
        /*
-        * Sanity check nr_running.  Because wq_unbind_fn() releases
+        * Sanity check nr_running.  Because unbind_workers() releases
         * pool->lock between setting %WORKER_UNBOUND and zapping
         * nr_running, the warning may trigger spuriously.  Check iff
         * unbind is not in progress.
@@ -4463,6 +4464,12 @@ void show_workqueue_state(void)
                        if (pwq->nr_active || !list_empty(&pwq->delayed_works))
                                show_pwq(pwq);
                        spin_unlock_irqrestore(&pwq->pool->lock, flags);
+                       /*
+                        * We could be printing a lot from atomic context, e.g.
+                        * sysrq-t -> show_workqueue_state(). Avoid triggering
+                        * hard lockup.
+                        */
+                       touch_nmi_watchdog();
                }
        }
 
@@ -4490,6 +4497,12 @@ void show_workqueue_state(void)
                pr_cont("\n");
        next_pool:
                spin_unlock_irqrestore(&pool->lock, flags);
+               /*
+                * We could be printing a lot from atomic context, e.g.
+                * sysrq-t -> show_workqueue_state(). Avoid triggering
+                * hard lockup.
+                */
+               touch_nmi_watchdog();
        }
 
        rcu_read_unlock_sched();
@@ -4510,9 +4523,8 @@ void show_workqueue_state(void)
  * cpu comes back online.
  */
 
-static void wq_unbind_fn(struct work_struct *work)
+static void unbind_workers(int cpu)
 {
-       int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
 
@@ -4589,16 +4601,6 @@ static void rebind_workers(struct worker_pool *pool)
 
        spin_lock_irq(&pool->lock);
 
-       /*
-        * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
-        * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
-        * being reworked and this can go away in time.
-        */
-       if (!(pool->flags & POOL_DISASSOCIATED)) {
-               spin_unlock_irq(&pool->lock);
-               return;
-       }
-
        pool->flags &= ~POOL_DISASSOCIATED;
 
        for_each_pool_worker(worker, pool) {
@@ -4709,12 +4711,13 @@ int workqueue_online_cpu(unsigned int cpu)
 
 int workqueue_offline_cpu(unsigned int cpu)
 {
-       struct work_struct unbind_work;
        struct workqueue_struct *wq;
 
        /* unbinding per-cpu workers should happen on the local CPU */
-       INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
-       queue_work_on(cpu, system_highpri_wq, &unbind_work);
+       if (WARN_ON(cpu != smp_processor_id()))
+               return -1;
+
+       unbind_workers(cpu);
 
        /* update NUMA affinity of unbound workqueues */
        mutex_lock(&wq_pool_mutex);
@@ -4722,9 +4725,6 @@ int workqueue_offline_cpu(unsigned int cpu)
                wq_update_unbound_numa(wq, cpu, false);
        mutex_unlock(&wq_pool_mutex);
 
-       /* wait for per-cpu unbinding to finish */
-       flush_work(&unbind_work);
-       destroy_work_on_stack(&unbind_work);
        return 0;
 }
 
@@ -4957,6 +4957,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
        if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
                return -ENOMEM;
 
+       /*
+        * Not excluding isolated cpus on purpose.
+        * If the user wishes to include them, we allow that.
+        */
        cpumask_and(cpumask, cpumask, cpu_possible_mask);
        if (!cpumask_empty(cpumask)) {
                apply_wqattrs_lock();
@@ -5555,7 +5559,7 @@ int __init workqueue_init_early(void)
        WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
 
        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
-       cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
+       cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN));
 
        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
index 947d3e2..9d5b78a 100644 (file)
@@ -1099,8 +1099,6 @@ config PROVE_LOCKING
        select DEBUG_MUTEXES
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select DEBUG_LOCK_ALLOC
-       select LOCKDEP_CROSSRELEASE
-       select LOCKDEP_COMPLETIONS
        select TRACE_IRQFLAGS
        default n
        help
@@ -1170,37 +1168,6 @@ config LOCK_STAT
         CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
         (CONFIG_LOCKDEP defines "acquire" and "release" events.)
 
-config LOCKDEP_CROSSRELEASE
-       bool
-       help
-        This makes lockdep work for crosslock which is a lock allowed to
-        be released in a different context from the acquisition context.
-        Normally a lock must be released in the context acquiring the lock.
-        However, relexing this constraint helps synchronization primitives
-        such as page locks or completions can use the lock correctness
-        detector, lockdep.
-
-config LOCKDEP_COMPLETIONS
-       bool
-       help
-        A deadlock caused by wait_for_completion() and complete() can be
-        detected by lockdep using crossrelease feature.
-
-config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
-       bool "Enable the boot parameter, crossrelease_fullstack"
-       depends on LOCKDEP_CROSSRELEASE
-       default n
-       help
-        The lockdep "cross-release" feature needs to record stack traces
-        (of calling functions) for all acquisitions, for eventual later
-        use during analysis. By default only a single caller is recorded,
-        because the unwind operation can be very expensive with deeper
-        stack chains.
-
-        However a boot parameter, crossrelease_fullstack, was
-        introduced since sometimes deeper traces are required for full
-        analysis. This option turns on the boot parameter.
-
 config DEBUG_LOCKDEP
        bool "Lock dependency engine debugging"
        depends on DEBUG_KERNEL && LOCKDEP
index 1b6087d..3ffc46e 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __ashldi3(long long u, word_type b)
 {
index 2e67c97..ea05455 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __ashrdi3(long long u, word_type b)
 {
index 1ef0cec..dc14bea 100644 (file)
@@ -313,42 +313,47 @@ next_op:
 
        /* Decide how to handle the operation */
        switch (op) {
-       case ASN1_OP_MATCH_ANY_ACT:
-       case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
-       case ASN1_OP_COND_MATCH_ANY_ACT:
-       case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
-               ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
-               if (ret < 0)
-                       return ret;
-               goto skip_data;
-
-       case ASN1_OP_MATCH_ACT:
-       case ASN1_OP_MATCH_ACT_OR_SKIP:
-       case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
-               ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
-               if (ret < 0)
-                       return ret;
-               goto skip_data;
-
        case ASN1_OP_MATCH:
        case ASN1_OP_MATCH_OR_SKIP:
+       case ASN1_OP_MATCH_ACT:
+       case ASN1_OP_MATCH_ACT_OR_SKIP:
        case ASN1_OP_MATCH_ANY:
        case ASN1_OP_MATCH_ANY_OR_SKIP:
+       case ASN1_OP_MATCH_ANY_ACT:
+       case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
        case ASN1_OP_COND_MATCH_OR_SKIP:
+       case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
        case ASN1_OP_COND_MATCH_ANY:
        case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
-       skip_data:
+       case ASN1_OP_COND_MATCH_ANY_ACT:
+       case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
+
                if (!(flags & FLAG_CONS)) {
                        if (flags & FLAG_INDEFINITE_LENGTH) {
+                               size_t tmp = dp;
+
                                ret = asn1_find_indefinite_length(
-                                       data, datalen, &dp, &len, &errmsg);
+                                       data, datalen, &tmp, &len, &errmsg);
                                if (ret < 0)
                                        goto error;
-                       } else {
-                               dp += len;
                        }
                        pr_debug("- LEAF: %zu\n", len);
                }
+
+               if (op & ASN1_OP_MATCH__ACT) {
+                       unsigned char act;
+
+                       if (op & ASN1_OP_MATCH__ANY)
+                               act = machine[pc + 1];
+                       else
+                               act = machine[pc + 2];
+                       ret = actions[act](context, hdr, tag, data + dp, len);
+                       if (ret < 0)
+                               return ret;
+               }
+
+               if (!(flags & FLAG_CONS))
+                       dp += len;
                pc += asn1_op_lengths[op];
                goto next_op;
 
@@ -434,6 +439,8 @@ next_op:
                        else
                                act = machine[pc + 1];
                        ret = actions[act](context, hdr, 0, data + tdp, len);
+                       if (ret < 0)
+                               return ret;
                }
                pc += asn1_op_lengths[op];
                goto next_op;
index 6d7ebf6..2250da7 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 word_type notrace __cmpdi2(long long a, long long b)
 {
index 7f6dd68..d873b34 100644 (file)
@@ -51,8 +51,49 @@ u16 const crc_ccitt_table[256] = {
 };
 EXPORT_SYMBOL(crc_ccitt_table);
 
+/*
+ * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
+ * Reflected bits order, does not augment final value.
+ */
+u16 const crc_ccitt_false_table[256] = {
+    0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
+    0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
+    0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
+    0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
+    0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
+    0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
+    0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
+    0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
+    0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
+    0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
+    0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
+    0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
+    0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
+    0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
+    0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
+    0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
+    0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
+    0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
+    0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
+    0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
+    0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
+    0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+    0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
+    0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
+    0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
+    0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
+    0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
+    0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
+    0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
+    0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
+    0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
+    0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
+};
+EXPORT_SYMBOL(crc_ccitt_false_table);
+
 /**
- *     crc_ccitt - recompute the CRC for the data buffer
+ *     crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
+ *     buffer
  *     @crc: previous CRC value
  *     @buffer: data pointer
  *     @len: number of bytes in the buffer
@@ -65,5 +106,20 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
 }
 EXPORT_SYMBOL(crc_ccitt);
 
+/**
+ *     crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
+ *     for the data buffer
+ *     @crc: previous CRC value
+ *     @buffer: data pointer
+ *     @len: number of bytes in the buffer
+ */
+u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
+{
+       while (len--)
+               crc = crc_ccitt_false_byte(crc, *buffer++);
+       return crc;
+}
+EXPORT_SYMBOL(crc_ccitt_false);
+
 MODULE_DESCRIPTION("CRC-CCITT calculations");
 MODULE_LICENSE("GPL");
index c3e84ed..2615074 100644 (file)
@@ -346,7 +346,8 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
 static void zap_modalias_env(struct kobj_uevent_env *env)
 {
        static const char modalias_prefix[] = "MODALIAS=";
-       int i;
+       size_t len;
+       int i, j;
 
        for (i = 0; i < env->envp_idx;) {
                if (strncmp(env->envp[i], modalias_prefix,
@@ -355,11 +356,18 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
                        continue;
                }
 
-               if (i != env->envp_idx - 1)
-                       memmove(&env->envp[i], &env->envp[i + 1],
-                               sizeof(env->envp[i]) * env->envp_idx - 1);
+               len = strlen(env->envp[i]) + 1;
+
+               if (i != env->envp_idx - 1) {
+                       memmove(env->envp[i], env->envp[i + 1],
+                               env->buflen - len);
+
+                       for (j = i; j < env->envp_idx - 1; j++)
+                               env->envp[j] = env->envp[j + 1] - len;
+               }
 
                env->envp_idx--;
+               env->buflen -= len;
        }
 }
 
index 8e845f4..99cfa57 100644 (file)
@@ -17,7 +17,7 @@
  */
 
 #include <linux/module.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __lshrdi3(long long u, word_type b)
 {
index 57fd45a..08c60d1 100644 (file)
@@ -671,7 +671,23 @@ do {                                               \
        **************  MIPS/64  **************
        ***************************************/
 #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
+/*
+ * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
+ * code below, so we special case MIPS64r6 until the compiler can do better.
+ */
+#define umul_ppmm(w1, w0, u, v)                                                \
+do {                                                                   \
+       __asm__ ("dmulu %0,%1,%2"                                       \
+                : "=d" ((UDItype)(w0))                                 \
+                : "d" ((UDItype)(u)),                                  \
+                  "d" ((UDItype)(v)));                                 \
+       __asm__ ("dmuhu %0,%1,%2"                                       \
+                : "=d" ((UDItype)(w1))                                 \
+                : "d" ((UDItype)(u)),                                  \
+                  "d" ((UDItype)(v)));                                 \
+} while (0)
+#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
 #define umul_ppmm(w1, w0, u, v) \
 do {                                                                   \
        typedef unsigned int __ll_UTItype __attribute__((mode(TI)));    \
index 8893854..54c8b31 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include <linux/export.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 #define W_TYPE_SIZE 32
 
index 8bf78b4..dfa55c8 100644 (file)
 #include <linux/types.h>
 #include <net/netlink.h>
 
-/* for these data types attribute length must be exactly given size */
+/* For these data types, attribute length should be exactly the given
+ * size. However, to maintain compatibility with broken commands, if the
+ * attribute length does not match the expected size a warning is emitted
+ * to the user that the command is sending invalid data and needs to be fixed.
+ */
 static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
        [NLA_U8]        = sizeof(u8),
        [NLA_U16]       = sizeof(u16),
@@ -28,8 +32,16 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
 };
 
 static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
+       [NLA_U8]        = sizeof(u8),
+       [NLA_U16]       = sizeof(u16),
+       [NLA_U32]       = sizeof(u32),
+       [NLA_U64]       = sizeof(u64),
        [NLA_MSECS]     = sizeof(u64),
        [NLA_NESTED]    = NLA_HDRLEN,
+       [NLA_S8]        = sizeof(s8),
+       [NLA_S16]       = sizeof(s16),
+       [NLA_S32]       = sizeof(s32),
+       [NLA_S64]       = sizeof(s64),
 };
 
 static int validate_nla_bitfield32(const struct nlattr *nla,
@@ -69,11 +81,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
 
        BUG_ON(pt->type > NLA_TYPE_MAX);
 
-       /* for data types NLA_U* and NLA_S* require exact length */
-       if (nla_attr_len[pt->type]) {
-               if (attrlen != nla_attr_len[pt->type])
-                       return -ERANGE;
-               return 0;
+       if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
+               pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+                                   current->comm, type);
        }
 
        switch (pt->type) {
index 41b9e50..0bcac6c 100644 (file)
@@ -116,14 +116,14 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
        int count;
 
        if (v >= end)
-               return -EBADMSG;
+               goto bad;
 
        n = *v++;
        ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
+       if (count >= bufsize)
+               return -ENOBUFS;
        buffer += count;
        bufsize -= count;
-       if (bufsize == 0)
-               return -ENOBUFS;
 
        while (v < end) {
                num = 0;
@@ -134,20 +134,24 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
                        num = n & 0x7f;
                        do {
                                if (v >= end)
-                                       return -EBADMSG;
+                                       goto bad;
                                n = *v++;
                                num <<= 7;
                                num |= n & 0x7f;
                        } while (n & 0x80);
                }
                ret += count = snprintf(buffer, bufsize, ".%lu", num);
-               buffer += count;
-               if (bufsize <= count)
+               if (count >= bufsize)
                        return -ENOBUFS;
+               buffer += count;
                bufsize -= count;
        }
 
        return ret;
+
+bad:
+       snprintf(buffer, bufsize, "(bad)");
+       return -EBADMSG;
 }
 EXPORT_SYMBOL_GPL(sprint_oid);
 
index ba4a9d1..d3ff682 100644 (file)
@@ -603,6 +603,16 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
 }
 EXPORT_SYMBOL(rb_replace_node);
 
+void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+                           struct rb_root_cached *root)
+{
+       rb_replace_node(victim, new, &root->rb_root);
+
+       if (root->rb_leftmost == victim)
+               root->rb_leftmost = new;
+}
+EXPORT_SYMBOL(rb_replace_node_cached);
+
 void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
                         struct rb_root *root)
 {
index aa8812a..f369889 100644 (file)
@@ -435,6 +435,41 @@ loop:
        return 0;
 }
 
+static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
+{
+       struct bpf_insn *insn;
+
+       insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       /* Due to func address being non-const, we need to
+        * assemble this here.
+        */
+       insn[0] = BPF_MOV64_REG(R6, R1);
+       insn[1] = BPF_LD_ABS(BPF_B, 0);
+       insn[2] = BPF_LD_ABS(BPF_H, 0);
+       insn[3] = BPF_LD_ABS(BPF_W, 0);
+       insn[4] = BPF_MOV64_REG(R7, R6);
+       insn[5] = BPF_MOV64_IMM(R6, 0);
+       insn[6] = BPF_MOV64_REG(R1, R7);
+       insn[7] = BPF_MOV64_IMM(R2, 1);
+       insn[8] = BPF_MOV64_IMM(R3, 2);
+       insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                              bpf_skb_vlan_push_proto.func - __bpf_call_base);
+       insn[10] = BPF_MOV64_REG(R6, R7);
+       insn[11] = BPF_LD_ABS(BPF_B, 0);
+       insn[12] = BPF_LD_ABS(BPF_H, 0);
+       insn[13] = BPF_LD_ABS(BPF_W, 0);
+       insn[14] = BPF_MOV64_IMM(R0, 42);
+       insn[15] = BPF_EXIT_INSN();
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = 16;
+
+       return 0;
+}
+
 static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
 {
        unsigned int len = BPF_MAXINSNS;
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
                {},
                { {0x1, 0x42 } },
        },
+       {
+               "LD_ABS with helper changing skb data",
+               { },
+               INTERNAL,
+               { 0x34 },
+               { { ETH_HLEN, 42 } },
+               .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
+       },
 };
 
 static struct net_device dev;
@@ -6207,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
                                return NULL;
                        }
                }
-               /* We don't expect to fail. */
                if (*err) {
-                       pr_cont("FAIL to attach err=%d len=%d\n",
+                       pr_cont("FAIL to prog_create err=%d len=%d\n",
                                *err, fprog.len);
                        return NULL;
                }
@@ -6233,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
                 * checks.
                 */
                fp = bpf_prog_select_runtime(fp, err);
+               if (*err) {
+                       pr_cont("FAIL to select_runtime err=%d\n", *err);
+                       return NULL;
+               }
                break;
        }
 
@@ -6418,8 +6464,8 @@ static __init int test_bpf(void)
                                pass_cnt++;
                                continue;
                        }
-
-                       return err;
+                       err_cnt++;
+                       continue;
                }
 
                pr_cont("jited:%u ", fp->jited);
index 563f10e..71ebfa4 100644 (file)
 #define PAD_SIZE 16
 #define FILL_CHAR '$'
 
-#define PTR1 ((void*)0x01234567)
-#define PTR2 ((void*)(long)(int)0xfedcba98)
-
-#if BITS_PER_LONG == 64
-#define PTR1_ZEROES "000000000"
-#define PTR1_SPACES "         "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fffffffffedcba98"
-#define PTR_WIDTH 16
-#else
-#define PTR1_ZEROES "0"
-#define PTR1_SPACES " "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fedcba98"
-#define PTR_WIDTH 8
-#endif
-#define PTR_WIDTH_STR stringify(PTR_WIDTH)
-
 static unsigned total_tests __initdata;
 static unsigned failed_tests __initdata;
 static char *test_buffer __initdata;
@@ -217,30 +199,79 @@ test_string(void)
        test("a  |   |   ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c");
 }
 
+#define PLAIN_BUF_SIZE 64      /* leave some space so we don't oops */
+
+#if BITS_PER_LONG == 64
+
+#define PTR_WIDTH 16
+#define PTR ((void *)0xffff0123456789ab)
+#define PTR_STR "ffff0123456789ab"
+#define ZEROS "00000000"       /* hex 32 zero bits */
+
+static int __init
+plain_format(void)
+{
+       char buf[PLAIN_BUF_SIZE];
+       int nchars;
+
+       nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+       if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+               return -1;
+
+       return 0;
+}
+
+#else
+
+#define PTR_WIDTH 8
+#define PTR ((void *)0x456789ab)
+#define PTR_STR "456789ab"
+
+static int __init
+plain_format(void)
+{
+       /* Format is implicitly tested for 32 bit machines by plain_hash() */
+       return 0;
+}
+
+#endif /* BITS_PER_LONG == 64 */
+
+static int __init
+plain_hash(void)
+{
+       char buf[PLAIN_BUF_SIZE];
+       int nchars;
+
+       nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+       if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * We can't use test() to test %p because we don't know what output to expect
+ * after an address is hashed.
+ */
 static void __init
 plain(void)
 {
-       test(PTR1_ZEROES PTR1_STR " " PTR2_STR, "%p %p", PTR1, PTR2);
-       /*
-        * The field width is overloaded for some %p extensions to
-        * pass another piece of information. For plain pointers, the
-        * behaviour is slightly odd: One cannot pass either the 0
-        * flag nor a precision to %p without gcc complaining, and if
-        * one explicitly gives a field width, the number is no longer
-        * zero-padded.
-        */
-       test("|" PTR1_STR PTR1_SPACES "  |  " PTR1_SPACES PTR1_STR "|",
-            "|%-*p|%*p|", PTR_WIDTH+2, PTR1, PTR_WIDTH+2, PTR1);
-       test("|" PTR2_STR "  |  " PTR2_STR "|",
-            "|%-*p|%*p|", PTR_WIDTH+2, PTR2, PTR_WIDTH+2, PTR2);
+       int err;
 
-       /*
-        * Unrecognized %p extensions are treated as plain %p, but the
-        * alphanumeric suffix is ignored (that is, does not occur in
-        * the output.)
-        */
-       test("|"PTR1_ZEROES PTR1_STR"|", "|%p0y|", PTR1);
-       test("|"PTR2_STR"|", "|%p0y|", PTR2);
+       err = plain_hash();
+       if (err) {
+               pr_warn("plain 'p' does not appear to be hashed\n");
+               failed_tests++;
+               return;
+       }
+
+       err = plain_format();
+       if (err) {
+               pr_warn("hashing plain 'p' has unexpected format\n");
+               failed_tests++;
+       }
 }
 
 static void __init
@@ -251,6 +282,7 @@ symbol_ptr(void)
 static void __init
 kernel_ptr(void)
 {
+       /* We can't test this without access to kptr_restrict. */
 }
 
 static void __init
index 4a720ed..0d54bcb 100644 (file)
@@ -33,8 +33,9 @@
  * @head: head of timerqueue
  * @node: timer node to be added
  *
- * Adds the timer node to the timerqueue, sorted by the
- * node's expires value.
+ * Adds the timer node to the timerqueue, sorted by the node's expires
+ * value. Returns true if the newly added timer is the first expiring timer in
+ * the queue.
  */
 bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
 {
@@ -70,7 +71,8 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
  * @head: head of timerqueue
  * @node: timer node to be removed
  *
- * Removes the timer node from the timerqueue.
+ * Removes the timer node from the timerqueue. Returns true if the queue is
+ * not empty after the remove.
  */
 bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
 {
index 49a5350..25ca2d4 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include <linux/module.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 word_type __ucmpdi2(unsigned long long a, unsigned long long b)
 {
index 1746bae..01c3957 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/uuid.h>
 #include <linux/of.h>
 #include <net/addrconf.h>
+#include <linux/siphash.h>
+#include <linux/compiler.h>
 #ifdef CONFIG_BLOCK
 #include <linux/blkdev.h>
 #endif
@@ -1343,6 +1345,59 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        return string(buf, end, uuid, spec);
 }
 
+int kptr_restrict __read_mostly;
+
+static noinline_for_stack
+char *restricted_pointer(char *buf, char *end, const void *ptr,
+                        struct printf_spec spec)
+{
+       spec.base = 16;
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = 2 * sizeof(ptr);
+               spec.flags |= ZEROPAD;
+       }
+
+       switch (kptr_restrict) {
+       case 0:
+               /* Always print %pK values */
+               break;
+       case 1: {
+               const struct cred *cred;
+
+               /*
+                * kptr_restrict==1 cannot be used in IRQ context
+                * because its test for CAP_SYSLOG would be meaningless.
+                */
+               if (in_irq() || in_serving_softirq() || in_nmi())
+                       return string(buf, end, "pK-error", spec);
+
+               /*
+                * Only print the real pointer value if the current
+                * process has CAP_SYSLOG and is running with the
+                * same credentials it started with. This is because
+                * access to files is checked at open() time, but %pK
+                * checks permission at read() time. We don't want to
+                * leak pointer values if a binary opens a file using
+                * %pK and then elevates privileges before reading it.
+                */
+               cred = current_cred();
+               if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+                   !uid_eq(cred->euid, cred->uid) ||
+                   !gid_eq(cred->egid, cred->gid))
+                       ptr = NULL;
+               break;
+       }
+       case 2:
+       default:
+               /* Always print 0's for %pK */
+               ptr = NULL;
+               break;
+       }
+
+       return number(buf, end, (unsigned long)ptr, spec);
+}
+
 static noinline_for_stack
 char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
 {
@@ -1591,7 +1646,86 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
        return widen_string(buf, buf - buf_start, end, spec);
 }
 
-int kptr_restrict __read_mostly;
+static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+                    struct printf_spec spec)
+{
+       spec.base = 16;
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = 2 * sizeof(ptr);
+               spec.flags |= ZEROPAD;
+       }
+
+       return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+static bool have_filled_random_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+       get_random_bytes(&ptr_key, sizeof(ptr_key));
+       /*
+        * have_filled_random_ptr_key==true is dependent on get_random_bytes().
+        * ptr_to_id() needs to see have_filled_random_ptr_key==true
+        * after get_random_bytes() returns.
+        */
+       smp_mb();
+       WRITE_ONCE(have_filled_random_ptr_key, true);
+}
+
+static struct random_ready_callback random_ready = {
+       .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+       int ret = add_random_ready_callback(&random_ready);
+
+       if (!ret) {
+               return 0;
+       } else if (ret == -EALREADY) {
+               fill_random_ptr_key(&random_ready);
+               return 0;
+       }
+
+       return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
+{
+       unsigned long hashval;
+       const int default_width = 2 * sizeof(ptr);
+
+       if (unlikely(!have_filled_random_ptr_key)) {
+               spec.field_width = default_width;
+               /* string length must be less than default_width */
+               return string(buf, end, "(ptrval)", spec);
+       }
+
+#ifdef CONFIG_64BIT
+       hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+       /*
+        * Mask off the first 32 bits, this makes explicit that we have
+        * modified the address (and 32 bits is plenty for a unique ID).
+        */
+       hashval = hashval & 0xffffffff;
+#else
+       hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = default_width;
+               spec.flags |= ZEROPAD;
+       }
+       spec.base = 16;
+
+       return number(buf, end, hashval, spec);
+}
 
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
@@ -1698,11 +1832,16 @@ int kptr_restrict __read_mostly;
  *                        c major compatible string
  *                        C full compatible string
  *
+ * - 'x' For printing the address. Equivalent to "%lx".
+ *
  * ** Please update also Documentation/printk-formats.txt when making changes **
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
  * pointer to the real address.
+ *
+ * Note: The default behaviour (unadorned %p) is to hash the address,
+ * rendering it useful as a unique identifier.
  */
 static noinline_for_stack
 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -1792,47 +1931,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                        return buf;
                }
        case 'K':
-               switch (kptr_restrict) {
-               case 0:
-                       /* Always print %pK values */
-                       break;
-               case 1: {
-                       const struct cred *cred;
-
-                       /*
-                        * kptr_restrict==1 cannot be used in IRQ context
-                        * because its test for CAP_SYSLOG would be meaningless.
-                        */
-                       if (in_irq() || in_serving_softirq() || in_nmi()) {
-                               if (spec.field_width == -1)
-                                       spec.field_width = default_width;
-                               return string(buf, end, "pK-error", spec);
-                       }
-
-                       /*
-                        * Only print the real pointer value if the current
-                        * process has CAP_SYSLOG and is running with the
-                        * same credentials it started with. This is because
-                        * access to files is checked at open() time, but %pK
-                        * checks permission at read() time. We don't want to
-                        * leak pointer values if a binary opens a file using
-                        * %pK and then elevates privileges before reading it.
-                        */
-                       cred = current_cred();
-                       if (!has_capability_noaudit(current, CAP_SYSLOG) ||
-                           !uid_eq(cred->euid, cred->uid) ||
-                           !gid_eq(cred->egid, cred->gid))
-                               ptr = NULL;
-                       break;
-               }
-               case 2:
-               default:
-                       /* Always print 0's for %pK */
-                       ptr = NULL;
+               if (!kptr_restrict)
                        break;
-               }
-               break;
-
+               return restricted_pointer(buf, end, ptr, spec);
        case 'N':
                return netdev_bits(buf, end, ptr, fmt);
        case 'a':
@@ -1857,15 +1958,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                case 'F':
                        return device_node_string(buf, end, ptr, spec, fmt + 1);
                }
+       case 'x':
+               return pointer_string(buf, end, ptr, spec);
        }
-       spec.flags |= SMALL;
-       if (spec.field_width == -1) {
-               spec.field_width = default_width;
-               spec.flags |= ZEROPAD;
-       }
-       spec.base = 16;
 
-       return number(buf, end, (unsigned long) ptr, spec);
+       /* default is to _not_ leak addresses, hash before printing */
+       return ptr_to_id(buf, end, ptr, spec);
 }
 
 /*
index 74b52df..b5f940c 100644 (file)
@@ -113,11 +113,23 @@ static const struct file_operations bdi_debug_stats_fops = {
        .release        = single_release,
 };
 
-static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
+static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 {
+       if (!bdi_debug_root)
+               return -ENOMEM;
+
        bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
+       if (!bdi->debug_dir)
+               return -ENOMEM;
+
        bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
                                               bdi, &bdi_debug_stats_fops);
+       if (!bdi->debug_stats) {
+               debugfs_remove(bdi->debug_dir);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 static void bdi_debug_unregister(struct backing_dev_info *bdi)
@@ -129,9 +141,10 @@ static void bdi_debug_unregister(struct backing_dev_info *bdi)
 static inline void bdi_debug_init(void)
 {
 }
-static inline void bdi_debug_register(struct backing_dev_info *bdi,
+static inline int bdi_debug_register(struct backing_dev_info *bdi,
                                      const char *name)
 {
+       return 0;
 }
 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 {
index d947f3e..56e2d91 100644 (file)
@@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason)
         */
        int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
 
-       pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
+       pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
                  page, page_ref_count(page), mapcount,
                  page->mapping, page_to_pgoff(page));
        if (PageCompound(page))
@@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason)
 
 #ifdef CONFIG_MEMCG
        if (page->mem_cgroup)
-               pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
+               pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
 #endif
 }
 
@@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page);
 
 void dump_vma(const struct vm_area_struct *vma)
 {
-       pr_emerg("vma %p start %p end %p\n"
-               "next %p prev %p mm %p\n"
-               "prot %lx anon_vma %p vm_ops %p\n"
-               "pgoff %lx file %p private_data %p\n"
+       pr_emerg("vma %px start %px end %px\n"
+               "next %px prev %px mm %px\n"
+               "prot %lx anon_vma %px vm_ops %px\n"
+               "pgoff %lx file %px private_data %px\n"
                "flags: %#lx(%pGv)\n",
                vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
                vma->vm_prev, vma->vm_mm,
@@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma);
 
 void dump_mm(const struct mm_struct *mm)
 {
-       pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
+       pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
 #ifdef CONFIG_MMU
-               "get_unmapped_area %p\n"
+               "get_unmapped_area %px\n"
 #endif
                "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
-               "pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
+               "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
                "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
                "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
                "start_code %lx end_code %lx start_data %lx end_data %lx\n"
                "start_brk %lx brk %lx start_stack %lx\n"
                "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
-               "binfmt %p flags %lx core_state %p\n"
+               "binfmt %px flags %lx core_state %px\n"
 #ifdef CONFIG_AIO
-               "ioctx_table %p\n"
+               "ioctx_table %px\n"
 #endif
 #ifdef CONFIG_MEMCG
-               "owner %p "
+               "owner %px "
 #endif
-               "exe_file %p\n"
+               "exe_file %px\n"
 #ifdef CONFIG_MMU_NOTIFIER
-               "mmu_notifier_mm %p\n"
+               "mmu_notifier_mm %px\n"
 #endif
 #ifdef CONFIG_NUMA_BALANCING
                "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
index d04ac1e..1826f19 100644 (file)
@@ -111,7 +111,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
        enum fixed_addresses idx;
        int i, slot;
 
-       WARN_ON(system_state != SYSTEM_BOOTING);
+       WARN_ON(system_state >= SYSTEM_RUNNING);
 
        slot = -1;
        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
index 2f98df0..c64dca6 100644 (file)
@@ -53,6 +53,20 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
                ret = -EFAULT;
                goto out;
        }
+
+       /*
+        * While get_vaddr_frames() could be used for transient (kernel
+        * controlled lifetime) pinning of memory pages all current
+        * users establish long term (userspace controlled lifetime)
+        * page pinning. Treat get_vaddr_frames() like
+        * get_user_pages_longterm() and disallow it for filesystem-dax
+        * mappings.
+        */
+       if (vma_is_fsdax(vma)) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
                vec->got_ref = true;
                vec->is_pfns = false;
index dfcde13..e0d82b6 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1095,6 +1095,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
 }
 EXPORT_SYMBOL(get_user_pages);
 
+#ifdef CONFIG_FS_DAX
+/*
+ * This is the same as get_user_pages() in that it assumes we are
+ * operating on the current task's mm, but it goes further to validate
+ * that the vmas associated with the address range are suitable for
+ * longterm elevated page reference counts. For example, filesystem-dax
+ * mappings are subject to the lifetime enforced by the filesystem and
+ * we need guarantees that longterm users like RDMA and V4L2 only
+ * establish mappings that have a kernel enforced revocation mechanism.
+ *
+ * "longterm" == userspace controlled elevated page count lifetime.
+ * Contrast this to iov_iter_get_pages() usages which are transient.
+ */
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+               unsigned int gup_flags, struct page **pages,
+               struct vm_area_struct **vmas_arg)
+{
+       struct vm_area_struct **vmas = vmas_arg;
+       struct vm_area_struct *vma_prev = NULL;
+       long rc, i;
+
+       if (!pages)
+               return -EINVAL;
+
+       if (!vmas) {
+               vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
+                              GFP_KERNEL);
+               if (!vmas)
+                       return -ENOMEM;
+       }
+
+       rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+
+       for (i = 0; i < rc; i++) {
+               struct vm_area_struct *vma = vmas[i];
+
+               if (vma == vma_prev)
+                       continue;
+
+               vma_prev = vma;
+
+               if (vma_is_fsdax(vma))
+                       break;
+       }
+
+       /*
+        * Either get_user_pages() failed, or the vma validation
+        * succeeded, in either case we don't need to put_page() before
+        * returning.
+        */
+       if (i >= rc)
+               goto out;
+
+       for (i = 0; i < rc; i++)
+               put_page(pages[i]);
+       rc = -EOPNOTSUPP;
+out:
+       if (vmas != vmas_arg)
+               kfree(vmas);
+       return rc;
+}
+EXPORT_SYMBOL(get_user_pages_longterm);
+#endif /* CONFIG_FS_DAX */
+
 /**
  * populate_vma_page_range() -  populate a range of pages in the vma.
  * @vma:   target vma
index 86fe697..0e7ded9 100644 (file)
@@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd)
+               pmd_t *pmd, int flags)
 {
        pmd_t _pmd;
 
-       /*
-        * We should set the dirty bit only for FOLL_WRITE but for now
-        * the dirty bit in the pmd is meaningless.  And if the dirty
-        * bit will become meaningful and we'll only set it with
-        * FOLL_WRITE, an atomic set_bit will be required on the pmd to
-        * set the young bit, instead of the current set_pmd_at.
-        */
-       _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+       _pmd = pmd_mkyoung(*pmd);
+       if (flags & FOLL_WRITE)
+               _pmd = pmd_mkdirty(_pmd);
        if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
-                               pmd, _pmd,  1))
+                               pmd, _pmd, flags & FOLL_WRITE))
                update_mmu_cache_pmd(vma, addr, pmd);
 }
 
@@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd);
+               touch_pmd(vma, addr, pmd, flags);
 
        /*
         * device mapped pages can only be returned if the
@@ -995,20 +990,15 @@ out:
 
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
-               pud_t *pud)
+               pud_t *pud, int flags)
 {
        pud_t _pud;
 
-       /*
-        * We should set the dirty bit only for FOLL_WRITE but for now
-        * the dirty bit in the pud is meaningless.  And if the dirty
-        * bit will become meaningful and we'll only set it with
-        * FOLL_WRITE, an atomic set_bit will be required on the pud to
-        * set the young bit, instead of the current set_pud_at.
-        */
-       _pud = pud_mkyoung(pud_mkdirty(*pud));
+       _pud = pud_mkyoung(*pud);
+       if (flags & FOLL_WRITE)
+               _pud = pud_mkdirty(_pud);
        if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
-                               pud, _pud,  1))
+                               pud, _pud, flags & FOLL_WRITE))
                update_mmu_cache_pud(vma, addr, pud);
 }
 
@@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pud(vma, addr, pud);
+               touch_pud(vma, addr, pud, flags);
 
        /*
         * device mapped pages can only be returned if the
@@ -1424,7 +1414,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        page = pmd_page(*pmd);
        VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd);
+               touch_pmd(vma, addr, pmd, flags);
        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
                /*
                 * We don't mlock() pte-mapped THPs. This way we can avoid
index 681b300..9a334f5 100644 (file)
@@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
        }
 }
 
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       if (addr & ~(huge_page_mask(hstate_vma(vma))))
+               return -EINVAL;
+       return 0;
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
        .open = hugetlb_vm_op_open,
        .close = hugetlb_vm_op_close,
+       .split = hugetlb_vm_op_split,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -4627,7 +4635,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, addr);
-       p4d = p4d_offset(pgd, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return NULL;
        pud = pud_alloc(mm, p4d, addr);
        if (pud) {
                if (sz == PUD_SIZE) {
index 6bcfb01..410c823 100644 (file)
@@ -134,7 +134,7 @@ static void print_error_description(struct kasan_access_info *info)
 
        pr_err("BUG: KASAN: %s in %pS\n",
                bug_type, (void *)info->ip);
-       pr_err("%s of size %zu at addr %p by task %s/%d\n",
+       pr_err("%s of size %zu at addr %px by task %s/%d\n",
                info->is_write ? "Write" : "Read", info->access_size,
                info->access_addr, current->comm, task_pid_nr(current));
 }
@@ -206,7 +206,7 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
        const char *rel_type;
        int rel_bytes;
 
-       pr_err("The buggy address belongs to the object at %p\n"
+       pr_err("The buggy address belongs to the object at %px\n"
               " which belongs to the cache %s of size %d\n",
                object, cache->name, cache->object_size);
 
@@ -225,7 +225,7 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
        }
 
        pr_err("The buggy address is located %d bytes %s of\n"
-              " %d-byte region [%p, %p)\n",
+              " %d-byte region [%px, %px)\n",
                rel_bytes, rel_type, cache->object_size, (void *)object_addr,
                (void *)(object_addr + cache->object_size));
 }
@@ -302,7 +302,7 @@ static void print_shadow_for_address(const void *addr)
                char shadow_buf[SHADOW_BYTES_PER_ROW];
 
                snprintf(buffer, sizeof(buffer),
-                       (i == 0) ? ">%p: " : " %p: ", kaddr);
+                       (i == 0) ? ">%px: " : " %px: ", kaddr);
                /*
                 * We should not pass a shadow pointer to generic
                 * function, because generic functions may try to
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
index e4738d5..f656ca2 100644 (file)
 /* GFP bitmask for kmemleak internal allocations */
 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
-                                __GFP_NOWARN)
+                                __GFP_NOWARN | __GFP_NOFAIL)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
@@ -1523,6 +1523,8 @@ static void kmemleak_scan(void)
                        if (page_count(page) == 0)
                                continue;
                        scan_block(page, page + 1, NULL);
+                       if (!(pfn & 63))
+                               cond_resched();
                }
        }
        put_online_mems();
index 375cf32..751e97a 100644 (file)
@@ -276,15 +276,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
 {
        struct file *file = vma->vm_file;
 
+       *prev = vma;
 #ifdef CONFIG_SWAP
        if (!file) {
-               *prev = vma;
                force_swapin_readahead(vma, start, end);
                return 0;
        }
 
        if (shmem_mapping(file->f_mapping)) {
-               *prev = vma;
                force_shm_swapin_readahead(vma, start, end,
                                        file->f_mapping);
                return 0;
@@ -299,7 +298,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
                return 0;
        }
 
-       *prev = vma;
        start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        if (end > vma->vm_end)
                end = vma->vm_end;
index 50e6906..ac2ffd5 100644 (file)
@@ -6044,7 +6044,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        memcg_check_events(memcg, page);
 
        if (!mem_cgroup_is_root(memcg))
-               css_put(&memcg->css);
+               css_put_many(&memcg->css, nr_entries);
 }
 
 /**
index 85e7a87..7930046 100644 (file)
@@ -2857,8 +2857,11 @@ int do_swap_page(struct vm_fault *vmf)
        int ret = 0;
        bool vma_readahead = swap_use_vma_readahead();
 
-       if (vma_readahead)
+       if (vma_readahead) {
                page = swap_readahead_detect(vmf, &swap_ra);
+               swapcache = page;
+       }
+
        if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
                if (page)
                        put_page(page);
@@ -2889,9 +2892,12 @@ int do_swap_page(struct vm_fault *vmf)
 
 
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
-       if (!page)
+       if (!page) {
                page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
                                         vmf->address);
+               swapcache = page;
+       }
+
        if (!page) {
                struct swap_info_struct *si = swp_swap_info(entry);
 
@@ -3831,7 +3837,8 @@ static inline int create_huge_pmd(struct vm_fault *vmf)
        return VM_FAULT_FALLBACK;
 }
 
-static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
+/* `inline' is required to avoid gcc 4.1.2 build error */
+static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
 {
        if (vma_is_anonymous(vmf->vma))
                return do_huge_pmd_wp_page(vmf, orig_pmd);
index 924839f..9efdc02 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2555,9 +2555,11 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        struct vm_area_struct *new;
        int err;
 
-       if (is_vm_hugetlb_page(vma) && (addr &
-                                       ~(huge_page_mask(hstate_vma(vma)))))
-               return -EINVAL;
+       if (vma->vm_ops && vma->vm_ops->split) {
+               err = vma->vm_ops->split(vma, addr);
+               if (err)
+                       return err;
+       }
 
        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
@@ -3017,20 +3019,20 @@ void exit_mmap(struct mm_struct *mm)
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, vma, 0, -1);
 
-       set_bit(MMF_OOM_SKIP, &mm->flags);
-       if (unlikely(tsk_is_oom_victim(current))) {
+       if (unlikely(mm_is_oom_victim(mm))) {
                /*
                 * Wait for oom_reap_task() to stop working on this
                 * mm. Because MMF_OOM_SKIP is already set before
                 * calling down_read(), oom_reap_task() will not run
                 * on this "mm" post up_write().
                 *
-                * tsk_is_oom_victim() cannot be set from under us
-                * either because current->mm is already set to NULL
+                * mm_is_oom_victim() cannot be set from under us
+                * either because victim->mm is already set to NULL
                 * under task_lock before calling mmput and oom_mm is
-                * set not NULL by the OOM killer only if current->mm
+                * set not NULL by the OOM killer only if victim->mm
                 * is found not NULL while holding the task_lock.
                 */
+               set_bit(MMF_OOM_SKIP, &mm->flags);
                down_write(&mm->mmap_sem);
                up_write(&mm->mmap_sem);
        }
index ec39f73..58b629b 100644 (file)
@@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                next = pmd_addr_end(addr, end);
                if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
                                && pmd_none_or_clear_bad(pmd))
-                       continue;
+                       goto next;
 
                /* invoke the mmu notifier if the pmd is populated */
                if (!mni_start) {
@@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                                        }
 
                                        /* huge pmd was handled */
-                                       continue;
+                                       goto next;
                                }
                        }
                        /* fall through, the trans huge pmd just split */
@@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                this_pages = change_pte_range(vma, pmd, addr, next, newprot,
                                 dirty_accountable, prot_numa);
                pages += this_pages;
+next:
+               cond_resched();
        } while (pmd++, addr = next, addr != end);
 
        if (mni_start)
index c86fbd1..29f8555 100644 (file)
@@ -550,7 +550,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
         */
        set_bit(MMF_UNSTABLE, &mm->flags);
 
-       tlb_gather_mmu(&tlb, mm, 0, -1);
        for (vma = mm->mmap ; vma; vma = vma->vm_next) {
                if (!can_madv_dontneed_vma(vma))
                        continue;
@@ -565,11 +564,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
                 * we do not want to block exit_mmap by keeping mm ref
                 * count elevated without a good reason.
                 */
-               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
+               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+                       tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
                        unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
                                         NULL);
+                       tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+               }
        }
-       tlb_finish_mmu(&tlb, 0, -1);
        pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
                        task_pid_nr(tsk), tsk->comm,
                        K(get_mm_counter(mm, MM_ANONPAGES)),
@@ -682,8 +683,10 @@ static void mark_oom_victim(struct task_struct *tsk)
                return;
 
        /* oom_mm is bound to the signal struct life time. */
-       if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+       if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
                mmgrab(tsk->signal->oom_mm);
+               set_bit(MMF_OOM_VICTIM, &mm->flags);
+       }
 
        /*
         * Make sure that the task is woken up from uninterruptible sleep
index e709503..586f312 100644 (file)
@@ -433,11 +433,8 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        else
                bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
-       if (unlikely(bg_thresh >= thresh)) {
-               pr_warn("vm direct limit must be set greater than background limit.\n");
+       if (bg_thresh >= thresh)
                bg_thresh = thresh / 2;
-       }
-
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
index d4096f4..76c9688 100644 (file)
@@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone)
        if (WARN_ON_ONCE(!mm_percpu_wq))
                return;
 
-       /* Workqueues cannot recurse */
-       if (current->flags & PF_WQ_WORKER)
-               return;
-
        /*
         * Do not drain if one is already in progress unless it's specific to
         * a zone. Such callers are primarily CMA and memory hotplug and need
@@ -2688,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
 {
        struct page *page, *next;
        unsigned long flags, pfn;
+       int batch_count = 0;
 
        /* Prepare pages for freeing */
        list_for_each_entry_safe(page, next, list, lru) {
@@ -2704,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
                set_page_private(page, 0);
                trace_mm_page_free_batched(page);
                free_unref_page_commit(page, pfn);
+
+               /*
+                * Guard against excessive IRQ disabled times when we get
+                * a large list of pages to free.
+                */
+               if (++batch_count == SWAP_CLUSTER_MAX) {
+                       local_irq_restore(flags);
+                       batch_count = 0;
+                       local_irq_save(flags);
+               }
        }
        local_irq_restore(flags);
 }
@@ -6253,6 +6260,8 @@ void __paginginit zero_resv_unavail(void)
        pgcnt = 0;
        for_each_resv_unavail_range(i, &start, &end) {
                for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
+                       if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
+                               continue;
                        mm_zero_struct_page(pfn_to_page(pfn));
                        pgcnt++;
                }
@@ -7656,11 +7665,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /*
         * In case of -EBUSY, we'd like to know which page causes problem.
-        * So, just fall through. We will check it in test_pages_isolated().
+        * So, just fall through. test_pages_isolated() has a tracepoint
+        * which will report the busy page.
+        *
+        * It is possible that busy pages could become available before
+        * the call to test_pages_isolated, and the range will actually be
+        * allocated.  So, if we fall through be sure to clear ret so that
+        * -EBUSY is not accidentally used or returned to caller.
         */
        ret = __alloc_contig_migrate_range(&cc, start, end);
        if (ret && ret != -EBUSY)
                goto done;
+       ret =0;
 
        /*
         * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
index 8592543..270a821 100644 (file)
@@ -616,7 +616,6 @@ static void init_early_allocated_pages(void)
 {
        pg_data_t *pgdat;
 
-       drain_all_pages(NULL);
        for_each_online_pgdat(pgdat)
                init_zones_in_node(pgdat);
 }
index d22b843..ae3c2a3 100644 (file)
@@ -30,10 +30,37 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
        return true;
 }
 
+static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
+{
+       unsigned long hpage_pfn = page_to_pfn(hpage);
+
+       /* THP can be referenced by any subpage */
+       return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
+}
+
+/**
+ * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
+ *
+ * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
+ * mapped. check_pte() has to validate this.
+ *
+ * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
+ * page.
+ *
+ * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
+ * entry that points to @pvmw->page or any subpage in case of THP.
+ *
+ * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
+ * @pvmw->page or any subpage in case of THP.
+ *
+ * Otherwise, return false.
+ *
+ */
 static bool check_pte(struct page_vma_mapped_walk *pvmw)
 {
+       unsigned long pfn;
+
        if (pvmw->flags & PVMW_MIGRATION) {
-#ifdef CONFIG_MIGRATION
                swp_entry_t entry;
                if (!is_swap_pte(*pvmw->pte))
                        return false;
@@ -41,38 +68,25 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
 
                if (!is_migration_entry(entry))
                        return false;
-               if (migration_entry_to_page(entry) - pvmw->page >=
-                               hpage_nr_pages(pvmw->page)) {
-                       return false;
-               }
-               if (migration_entry_to_page(entry) < pvmw->page)
-                       return false;
-#else
-               WARN_ON_ONCE(1);
-#endif
-       } else {
-               if (is_swap_pte(*pvmw->pte)) {
-                       swp_entry_t entry;
 
-                       entry = pte_to_swp_entry(*pvmw->pte);
-                       if (is_device_private_entry(entry) &&
-                           device_private_entry_to_page(entry) == pvmw->page)
-                               return true;
-               }
+               pfn = migration_entry_to_pfn(entry);
+       } else if (is_swap_pte(*pvmw->pte)) {
+               swp_entry_t entry;
 
-               if (!pte_present(*pvmw->pte))
+               /* Handle un-addressable ZONE_DEVICE memory */
+               entry = pte_to_swp_entry(*pvmw->pte);
+               if (!is_device_private_entry(entry))
                        return false;
 
-               /* THP can be referenced by any subpage */
-               if (pte_page(*pvmw->pte) - pvmw->page >=
-                               hpage_nr_pages(pvmw->page)) {
-                       return false;
-               }
-               if (pte_page(*pvmw->pte) < pvmw->page)
+               pfn = device_private_entry_to_pfn(entry);
+       } else {
+               if (!pte_present(*pvmw->pte))
                        return false;
+
+               pfn = pte_pfn(*pvmw->pte);
        }
 
-       return true;
+       return pfn_in_hpage(pvmw->page, pfn);
 }
 
 /**
index 79e3549..50e7fdf 100644 (file)
@@ -2719,7 +2719,11 @@ void __init setup_per_cpu_areas(void)
 
        if (pcpu_setup_first_chunk(ai, fc) < 0)
                panic("Failed to initialize percpu areas.");
+#ifdef CONFIG_CRIS
+#warning "the CRIS architecture has physical and virtual addresses confused"
+#else
        pcpu_free_alloc_info(ai);
+#endif
 }
 
 #endif /* CONFIG_SMP */
index 4aa9307..7fbe67b 100644 (file)
@@ -3776,7 +3776,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
         * tmpfs instance, limiting inodes to one per page of lowmem;
         * but the internal instance is left unlimited.
         */
-       if (!(sb->s_flags & MS_KERNMOUNT)) {
+       if (!(sb->s_flags & SB_KERNMOUNT)) {
                sbinfo->max_blocks = shmem_default_max_blocks();
                sbinfo->max_inodes = shmem_default_max_inodes();
                if (shmem_parse_options(data, sbinfo, false)) {
@@ -3784,12 +3784,12 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed;
                }
        } else {
-               sb->s_flags |= MS_NOUSER;
+               sb->s_flags |= SB_NOUSER;
        }
        sb->s_export_op = &shmem_export_ops;
-       sb->s_flags |= MS_NOSEC;
+       sb->s_flags |= SB_NOSEC;
 #else
-       sb->s_flags |= MS_NOUSER;
+       sb->s_flags |= SB_NOUSER;
 #endif
 
        spin_lock_init(&sbinfo->stat_lock);
@@ -3809,7 +3809,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_xattr = shmem_xattr_handlers;
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        uuid_gen(&sb->s_uuid);
 
index 183e996..4e51ef9 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1584,11 +1584,8 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
                       *dbg_redzone2(cachep, objp));
        }
 
-       if (cachep->flags & SLAB_STORE_USER) {
-               pr_err("Last user: [<%p>](%pSR)\n",
-                      *dbg_userword(cachep, objp),
-                      *dbg_userword(cachep, objp));
-       }
+       if (cachep->flags & SLAB_STORE_USER)
+               pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
        realobj = (char *)objp + obj_offset(cachep);
        size = cachep->object_size;
        for (i = 0; i < size && lines; i += 16, lines--) {
@@ -1621,7 +1618,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
                        /* Mismatch ! */
                        /* Print header */
                        if (lines == 0) {
-                               pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
+                               pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
                                       print_tainted(), cachep->name,
                                       realobj, size);
                                print_objinfo(cachep, objp, 0);
@@ -1650,13 +1647,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
                if (objnr) {
                        objp = index_to_obj(cachep, page, objnr - 1);
                        realobj = (char *)objp + obj_offset(cachep);
-                       pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
+                       pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
                        print_objinfo(cachep, objp, 2);
                }
                if (objnr + 1 < cachep->num) {
                        objp = index_to_obj(cachep, page, objnr + 1);
                        realobj = (char *)objp + obj_offset(cachep);
-                       pr_err("Next obj: start=%p, len=%d\n", realobj, size);
+                       pr_err("Next obj: start=%px, len=%d\n", realobj, size);
                        print_objinfo(cachep, objp, 2);
                }
        }
@@ -2608,7 +2605,7 @@ static void slab_put_obj(struct kmem_cache *cachep,
        /* Verify double free bug */
        for (i = page->active; i < cachep->num; i++) {
                if (get_free_obj(page, i) == objnr) {
-                       pr_err("slab: double free detected in cache '%s', objp %p\n",
+                       pr_err("slab: double free detected in cache '%s', objp %px\n",
                               cachep->name, objp);
                        BUG();
                }
@@ -2772,7 +2769,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
        else
                slab_error(cache, "memory outside object was overwritten");
 
-       pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
+       pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
               obj, redzone1, redzone2);
 }
 
@@ -3078,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
                                *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
                        slab_error(cachep, "double free, or memory outside object was overwritten");
-                       pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
+                       pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
                               objp, *dbg_redzone1(cachep, objp),
                               *dbg_redzone2(cachep, objp));
                }
@@ -3091,7 +3088,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                cachep->ctor(objp);
        if (ARCH_SLAB_MINALIGN &&
            ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
-               pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+               pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
                       objp, (int)ARCH_SLAB_MINALIGN);
        }
        return objp;
@@ -4283,7 +4280,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
                return;
        }
 #endif
-       seq_printf(m, "%p", (void *)address);
+       seq_printf(m, "%px", (void *)address);
 }
 
 static int leaks_show(struct seq_file *m, void *p)
index 7a5daca..2609aba 100644 (file)
@@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
        if (unlikely(!mem_section)) {
                unsigned long size, align;
 
-               size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
+               size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
                align = 1 << (INTERNODE_CACHE_SHIFT);
                mem_section = memblock_virt_alloc(size, align);
        }
index c02c850..47d5ced 100644 (file)
@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker);
  */
 void unregister_shrinker(struct shrinker *shrinker)
 {
+       if (!shrinker->nr_deferred)
+               return;
        down_write(&shrinker_rwsem);
        list_del(&shrinker->list);
        up_write(&shrinker_rwsem);
        kfree(shrinker->nr_deferred);
+       shrinker->nr_deferred = NULL;
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
index 685049a..683c065 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/mount.h>
 #include <linux/migrate.h>
 #include <linux/pagemap.h>
+#include <linux/fs.h>
 
 #define ZSPAGE_MAGIC   0x58
 
index 8dfdd94..bad01b1 100644 (file)
@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
                vlan_gvrp_uninit_applicant(real_dev);
        }
 
-       /* Take it out of our own structures, but be sure to interlock with
-        * HW accelerating devices or SW vlan input packet processing if
-        * VLAN is not 0 (leave it there for 802.1p).
-        */
-       if (vlan_id)
-               vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
+       vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
 
        /* Get rid of the vlan's reference to real_dev */
        dev_put(real_dev);
index 985046a..80f5c79 100644 (file)
@@ -839,7 +839,6 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
        if (IS_ERR(file)) {
                pr_err("%s (%d): failed to map fd\n",
                       __func__, task_pid_nr(current));
-               sock_release(csocket);
                kfree(p);
                return PTR_ERR(file);
        }
index 325c560..086a4ab 100644 (file)
@@ -543,3 +543,7 @@ static void p9_trans_xen_exit(void)
        return xenbus_unregister_driver(&xen_9pfs_front_driver);
 }
 module_exit(p9_trans_xen_exit);
+
+MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
+MODULE_DESCRIPTION("Xen Transport for 9P");
+MODULE_LICENSE("GPL");
index 1b659ab..bbe8414 100644 (file)
@@ -1214,7 +1214,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        orig_node->last_seen = jiffies;
 
        /* find packet count of corresponding one hop neighbor */
-       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+       spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
        if_num = if_incoming->if_num;
        orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
        neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
@@ -1224,7 +1224,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        } else {
                neigh_rq_count = 0;
        }
-       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+       spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
 
        /* pay attention to not get a value bigger than 100 % */
        if (orig_eq_count > neigh_rq_count)
index 341ceab..e0e2bfc 100644 (file)
@@ -814,7 +814,7 @@ static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
        }
 
        orig_gw = batadv_gw_node_get(bat_priv, orig_node);
-       if (!orig_node)
+       if (!orig_gw)
                goto out;
 
        if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
index a98cf11..ebe6e38 100644 (file)
@@ -499,6 +499,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
         */
        if (skb->priority >= 256 && skb->priority <= 263)
                frag_header.priority = skb->priority - 256;
+       else
+               frag_header.priority = 0;
 
        ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
        ether_addr_copy(frag_header.dest, orig_node->orig);
index 15cd213..ebc4e22 100644 (file)
@@ -482,7 +482,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
 
 /**
  * batadv_tp_sender_timeout - timer that fires in case of packet loss
- * @arg: address of the related tp_vars
+ * @t: address to timer_list inside tp_vars
  *
  * If fired it means that there was packet loss.
  * Switch to Slow Start, set the ss_threshold to half of the current cwnd and
@@ -1106,7 +1106,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
 /**
  * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
  *  reached without received ack
- * @arg: address of the related tp_vars
+ * @t: address to timer_list inside tp_vars
  */
 static void batadv_tp_receiver_shutdown(struct timer_list *t)
 {
index 43ba91c..fc6615d 100644 (file)
@@ -3363,9 +3363,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
                        break;
 
                case L2CAP_CONF_EFS:
-                       remote_efs = 1;
-                       if (olen == sizeof(efs))
+                       if (olen == sizeof(efs)) {
+                               remote_efs = 1;
                                memcpy(&efs, (void *) val, olen);
+                       }
                        break;
 
                case L2CAP_CONF_EWS:
@@ -3584,16 +3585,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
                        break;
 
                case L2CAP_CONF_EFS:
-                       if (olen == sizeof(efs))
+                       if (olen == sizeof(efs)) {
                                memcpy(&efs, (void *)val, olen);
 
-                       if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-                           efs.stype != L2CAP_SERV_NOTRAFIC &&
-                           efs.stype != chan->local_stype)
-                               return -ECONNREFUSED;
+                               if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+                                   efs.stype != L2CAP_SERV_NOTRAFIC &&
+                                   efs.stype != chan->local_stype)
+                                       return -ECONNREFUSED;
 
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
-                                          (unsigned long) &efs, endptr - ptr);
+                               l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+                                                  (unsigned long) &efs, endptr - ptr);
+                       }
                        break;
 
                case L2CAP_CONF_FCS:
index d0ef0a8..015f465 100644 (file)
@@ -1262,19 +1262,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
        struct net_bridge *br = netdev_priv(dev);
        int err;
 
+       err = register_netdevice(dev);
+       if (err)
+               return err;
+
        if (tb[IFLA_ADDRESS]) {
                spin_lock_bh(&br->lock);
                br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
                spin_unlock_bh(&br->lock);
        }
 
-       err = register_netdevice(dev);
-       if (err)
-               return err;
-
        err = br_changelink(dev, tb, data, extack);
        if (err)
-               unregister_netdevice(dev);
+               br_dev_delete(dev, NULL);
+
        return err;
 }
 
index 2d38b6e..e0adcd1 100644 (file)
@@ -334,9 +334,8 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        mutex_lock(&caifdevs->lock);
        list_add_rcu(&caifd->list, &caifdevs->list);
 
-       strncpy(caifd->layer.name, dev->name,
-               sizeof(caifd->layer.name) - 1);
-       caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+       strlcpy(caifd->layer.name, dev->name,
+               sizeof(caifd->layer.name));
        caifd->layer.transmit = transmit;
        cfcnfg_add_phy_layer(cfg,
                                dev,
index 5cd44f0..1a082a9 100644 (file)
@@ -176,9 +176,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
                dev_add_pack(&caif_usb_type);
        pack_added = true;
 
-       strncpy(layer->name, dev->name,
-                       sizeof(layer->name) - 1);
-       layer->name[sizeof(layer->name) - 1] = 0;
+       strlcpy(layer->name, dev->name, sizeof(layer->name));
 
        return 0;
 }
index 273cb07..8f00bea 100644 (file)
@@ -268,17 +268,15 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
        case CAIFPROTO_RFM:
                l->linktype = CFCTRL_SRV_RFM;
                l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
-               strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
-                       sizeof(l->u.rfm.volume)-1);
-               l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
+               strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
+                       sizeof(l->u.rfm.volume));
                break;
        case CAIFPROTO_UTIL:
                l->linktype = CFCTRL_SRV_UTIL;
                l->endpoint = 0x00;
                l->chtype = 0x00;
-               strncpy(l->u.utility.name, s->sockaddr.u.util.service,
-                       sizeof(l->u.utility.name)-1);
-               l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
+               strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
+                       sizeof(l->u.utility.name));
                caif_assert(sizeof(l->u.utility.name) > 10);
                l->u.utility.paramlen = s->param.size;
                if (l->u.utility.paramlen > sizeof(l->u.utility.params))
index f5afda1..655ed70 100644 (file)
@@ -258,8 +258,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
                tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
                cfpkt_add_body(pkt, &tmp16, 2);
                memset(utility_name, 0, sizeof(utility_name));
-               strncpy(utility_name, param->u.utility.name,
-                       UTILITY_NAME_LENGTH - 1);
+               strlcpy(utility_name, param->u.utility.name,
+                       UTILITY_NAME_LENGTH);
                cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
                tmp8 = param->u.utility.paramlen;
                cfpkt_add_body(pkt, &tmp8, 1);
index 003b2d6..4d7f988 100644 (file)
@@ -721,20 +721,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
 
-       if (WARN_ONCE(dev->type != ARPHRD_CAN ||
-                     skb->len != CAN_MTU ||
-                     cfd->len > CAN_MAX_DLEN,
-                     "PF_CAN: dropped non conform CAN skbuf: "
-                     "dev type %d, len %d, datalen %d\n",
-                     dev->type, skb->len, cfd->len))
-               goto drop;
+       if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
+                    cfd->len > CAN_MAX_DLEN)) {
+               pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
+                            dev->type, skb->len, cfd->len);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
 
        can_receive(skb, dev);
        return NET_RX_SUCCESS;
-
-drop:
-       kfree_skb(skb);
-       return NET_RX_DROP;
 }
 
 static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -742,20 +738,16 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
 
-       if (WARN_ONCE(dev->type != ARPHRD_CAN ||
-                     skb->len != CANFD_MTU ||
-                     cfd->len > CANFD_MAX_DLEN,
-                     "PF_CAN: dropped non conform CAN FD skbuf: "
-                     "dev type %d, len %d, datalen %d\n",
-                     dev->type, skb->len, cfd->len))
-               goto drop;
+       if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
+                    cfd->len > CANFD_MAX_DLEN)) {
+               pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
+                            dev->type, skb->len, cfd->len);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
 
        can_receive(skb, dev);
        return NET_RX_SUCCESS;
-
-drop:
-       kfree_skb(skb);
-       return NET_RX_DROP;
 }
 
 /*
index 07ed21d..613fb40 100644 (file)
@@ -1106,7 +1106,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
         * when the name is long and there isn't enough space left
         * for the digits, or if all bits are used.
         */
-       return p ? -ENFILE : -EEXIST;
+       return -ENFILE;
 }
 
 static int dev_alloc_name_ns(struct net *net,
@@ -1146,7 +1146,19 @@ EXPORT_SYMBOL(dev_alloc_name);
 int dev_get_valid_name(struct net *net, struct net_device *dev,
                       const char *name)
 {
-       return dev_alloc_name_ns(net, dev, name);
+       BUG_ON(!net);
+
+       if (!dev_valid_name(name))
+               return -EINVAL;
+
+       if (strchr(name, '%'))
+               return dev_alloc_name_ns(net, dev, name);
+       else if (__dev_get_by_name(net, name))
+               return -EEXIST;
+       else if (dev->name != name)
+               strlcpy(dev->name, name, IFNAMSIZ);
+
+       return 0;
 }
 EXPORT_SYMBOL(dev_get_valid_name);
 
@@ -3139,10 +3151,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
                hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
 
                /* + transport layer */
-               if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
-                       hdr_len += tcp_hdrlen(skb);
-               else
-                       hdr_len += sizeof(struct udphdr);
+               if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+                       const struct tcphdr *th;
+                       struct tcphdr _tcphdr;
+
+                       th = skb_header_pointer(skb, skb_transport_offset(skb),
+                                               sizeof(_tcphdr), &_tcphdr);
+                       if (likely(th))
+                               hdr_len += __tcp_hdrlen(th);
+               } else {
+                       struct udphdr _udphdr;
+
+                       if (skb_header_pointer(skb, skb_transport_offset(skb),
+                                              sizeof(_udphdr), &_udphdr))
+                               hdr_len += sizeof(struct udphdr);
+               }
 
                if (shinfo->gso_type & SKB_GSO_DODGY)
                        gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
@@ -3904,7 +3927,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                                     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
                                     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
                        goto do_drop;
-               if (troom > 0 && __skb_linearize(skb))
+               if (skb_linearize(skb))
                        goto do_drop;
        }
 
index f8fcf45..8225416 100644 (file)
@@ -770,15 +770,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
        return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
 }
 
-static void
-warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
-{
-       char name[sizeof(current->comm)];
-
-       pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
-                    get_task_comm(name, current), details);
-}
-
 /* Query device for its ethtool_cmd settings.
  *
  * Backward compatibility note: for compatibility with legacy ethtool,
@@ -805,10 +796,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
                                                           &link_ksettings);
                if (err < 0)
                        return err;
-               if (!convert_link_ksettings_to_legacy_settings(&cmd,
-                                                              &link_ksettings))
-                       warn_incomplete_ethtool_legacy_settings_conversion(
-                               "link modes are only partially reported");
+               convert_link_ksettings_to_legacy_settings(&cmd,
+                                                         &link_ksettings);
 
                /* send a sensible cmd tag back to user */
                cmd.cmd = ETHTOOL_GSET;
index 6a85e67..1c0eb43 100644 (file)
@@ -458,6 +458,10 @@ do_pass:
                            convert_bpf_extensions(fp, &insn))
                                break;
 
+                       if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
+                           fp->code == (BPF_ALU | BPF_MOD | BPF_X))
+                               *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
+
                        *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
                        break;
 
@@ -1054,11 +1058,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
                 */
                goto out_err_free;
 
-       /* We are guaranteed to never error here with cBPF to eBPF
-        * transitions, since there's no issue with type compatibility
-        * checks on program arrays.
-        */
        fp = bpf_prog_select_runtime(fp, &err);
+       if (err)
+               goto out_err_free;
 
        kfree(old_prog);
        return fp;
index 15ce300..544bddf 100644 (file)
@@ -976,8 +976,8 @@ ip_proto_again:
 out_good:
        ret = true;
 
-       key_control->thoff = (u16)nhoff;
 out:
+       key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
        key_basic->n_proto = proto;
        key_basic->ip_proto = ip_proto;
 
@@ -985,7 +985,6 @@ out:
 
 out_bad:
        ret = false;
-       key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
        goto out;
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
index d1f5fe9..7f83171 100644 (file)
@@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
        if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
                nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 
-       hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+       hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 
        if (n->parms->dead) {
                rc = ERR_PTR(-EINVAL);
@@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
             n1 != NULL;
             n1 = rcu_dereference_protected(n1->next,
                        lockdep_is_held(&tbl->lock))) {
-               if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
+               if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
                        if (want_ref)
                                neigh_hold(n1);
                        rc = n1;
index b797832..60a71be 100644 (file)
@@ -267,7 +267,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
        spin_lock_bh(&net->nsid_lock);
        peer = idr_find(&net->netns_ids, id);
        if (peer)
-               get_net(peer);
+               peer = maybe_get_net(peer);
        spin_unlock_bh(&net->nsid_lock);
        rcu_read_unlock();
 
index 1c48109..b905747 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/module.h>
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
index dabba2a..778d7f0 100644 (file)
@@ -1681,18 +1681,18 @@ static bool link_dump_filtered(struct net_device *dev,
        return false;
 }
 
-static struct net *get_target_net(struct sk_buff *skb, int netnsid)
+static struct net *get_target_net(struct sock *sk, int netnsid)
 {
        struct net *net;
 
-       net = get_net_ns_by_id(sock_net(skb->sk), netnsid);
+       net = get_net_ns_by_id(sock_net(sk), netnsid);
        if (!net)
                return ERR_PTR(-EINVAL);
 
        /* For now, the caller is required to have CAP_NET_ADMIN in
         * the user namespace owning the target net ns.
         */
-       if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+       if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
                put_net(net);
                return ERR_PTR(-EACCES);
        }
@@ -1733,7 +1733,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        ifla_policy, NULL) >= 0) {
                if (tb[IFLA_IF_NETNSID]) {
                        netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
-                       tgt_net = get_target_net(skb, netnsid);
+                       tgt_net = get_target_net(skb->sk, netnsid);
                        if (IS_ERR(tgt_net)) {
                                tgt_net = net;
                                netnsid = -1;
@@ -2883,7 +2883,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        if (tb[IFLA_IF_NETNSID]) {
                netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
-               tgt_net = get_target_net(skb, netnsid);
+               tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
                if (IS_ERR(tgt_net))
                        return PTR_ERR(tgt_net);
        }
index 6b0ff39..08f5740 100644 (file)
@@ -1177,12 +1177,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        int i, new_frags;
        u32 d_off;
 
-       if (!num_frags)
-               return 0;
-
        if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
                return -EINVAL;
 
+       if (!num_frags)
+               goto release;
+
        new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
        for (i = 0; i < new_frags; i++) {
                page = alloc_page(gfp_mask);
@@ -1238,6 +1238,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
        skb_shinfo(skb)->nr_frags = new_frags;
 
+release:
        skb_zcopy_clear(skb, false);
        return 0;
 }
@@ -3654,8 +3655,6 @@ normal:
 
                skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
                                              SKBTX_SHARED_FRAG;
-               if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC))
-                       goto err;
 
                while (pos < offset + len) {
                        if (i >= nfrags) {
@@ -3681,6 +3680,8 @@ normal:
 
                        if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
                                goto err;
+                       if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+                               goto err;
 
                        *nskb_frag = *frag;
                        __skb_frag_ref(nskb_frag);
@@ -4293,7 +4294,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
        struct sock *sk = skb->sk;
 
        if (!skb_may_tx_timestamp(sk, false))
-               return;
+               goto err;
 
        /* Take a reference to prevent skb_orphan() from freeing the socket,
         * but only if the socket refcount is not zero.
@@ -4302,7 +4303,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
                *skb_hwtstamps(skb) = *hwtstamps;
                __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
                sock_put(sk);
+               return;
        }
+
+err:
+       kfree_skb(skb);
 }
 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
 
index 217f4e3..146b50e 100644 (file)
@@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group)
        case SKNLGRP_INET6_UDP_DESTROY:
                if (!sock_diag_handlers[AF_INET6])
                        request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
-                                      NETLINK_SOCK_DIAG, AF_INET);
+                                      NETLINK_SOCK_DIAG, AF_INET6);
                break;
        }
        return 0;
index cbc3dde..a47ad6c 100644 (file)
@@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
                .data           = &bpf_jit_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
                .proc_handler   = proc_dointvec
+#else
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &one,
+#endif
        },
 # ifdef CONFIG_HAVE_EBPF_JIT
        {
index 1c75cd1..92d016e 100644 (file)
@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
 
        ccid2_pr_debug("RTO_EXPIRE\n");
 
+       if (sk->sk_state == DCCP_CLOSED)
+               goto out;
+
        /* back-off timer */
        hc->tx_rto <<= 1;
        if (hc->tx_rto > DCCP_RTO_MAX)
index abd07a4..178bb98 100644 (file)
@@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                if (state == DCCP_TIME_WAIT)
                        timeo = DCCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
index b68168f..9d43c1f 100644 (file)
@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet = inet_sk(sk);
+       struct dccp_sock *dp = dccp_sk(sk);
        int err = 0;
        const int old_state = sk->sk_state;
 
@@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags)
                sk->sk_err = ECONNRESET;
 
        dccp_clear_xmit_timers(sk);
+       ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
+       ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+       dp->dccps_hc_rx_ccid = NULL;
+       dp->dccps_hc_tx_ccid = NULL;
 
        __skb_queue_purge(&sk->sk_receive_queue);
        __skb_queue_purge(&sk->sk_write_queue);
index 44e3fb7..1e28742 100644 (file)
@@ -51,9 +51,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
        INIT_LIST_HEAD(&dst->list);
        list_add_tail(&dsa_tree_list, &dst->list);
 
-       /* Initialize the reference counter to the number of switches, not 1 */
        kref_init(&dst->refcount);
-       refcount_set(&dst->refcount.refcount, 0);
 
        return dst;
 }
@@ -64,20 +62,23 @@ static void dsa_tree_free(struct dsa_switch_tree *dst)
        kfree(dst);
 }
 
-static struct dsa_switch_tree *dsa_tree_touch(int index)
+static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
 {
-       struct dsa_switch_tree *dst;
-
-       dst = dsa_tree_find(index);
-       if (!dst)
-               dst = dsa_tree_alloc(index);
+       if (dst)
+               kref_get(&dst->refcount);
 
        return dst;
 }
 
-static void dsa_tree_get(struct dsa_switch_tree *dst)
+static struct dsa_switch_tree *dsa_tree_touch(int index)
 {
-       kref_get(&dst->refcount);
+       struct dsa_switch_tree *dst;
+
+       dst = dsa_tree_find(index);
+       if (dst)
+               return dsa_tree_get(dst);
+       else
+               return dsa_tree_alloc(index);
 }
 
 static void dsa_tree_release(struct kref *ref)
@@ -91,7 +92,8 @@ static void dsa_tree_release(struct kref *ref)
 
 static void dsa_tree_put(struct dsa_switch_tree *dst)
 {
-       kref_put(&dst->refcount, dsa_tree_release);
+       if (dst)
+               kref_put(&dst->refcount, dsa_tree_release);
 }
 
 static bool dsa_port_is_dsa(struct dsa_port *port)
@@ -765,6 +767,7 @@ int dsa_register_switch(struct dsa_switch *ds)
 
        mutex_lock(&dsa2_mutex);
        err = dsa_switch_probe(ds);
+       dsa_tree_put(ds->dst);
        mutex_unlock(&dsa2_mutex);
 
        return err;
index d6e7a64..a95a55f 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/of_net.h>
 #include <linux/of_mdio.h>
 #include <linux/mdio.h>
-#include <linux/list.h>
 #include <net/rtnetlink.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_mirred.h>
index a8d7c5a..6c231b4 100644 (file)
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
 
 static int arp_constructor(struct neighbour *neigh)
 {
-       __be32 addr = *(__be32 *)neigh->primary_key;
+       __be32 addr;
        struct net_device *dev = neigh->dev;
        struct in_device *in_dev;
        struct neigh_parms *parms;
+       u32 inaddr_any = INADDR_ANY;
 
+       if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+               memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
+
+       addr = *(__be32 *)neigh->primary_key;
        rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev);
        if (!in_dev) {
index a4573bc..7a93359 100644 (file)
@@ -1428,7 +1428,7 @@ skip:
 
 static bool inetdev_valid_mtu(unsigned int mtu)
 {
-       return mtu >= 68;
+       return mtu >= IPV4_MIN_MTU;
 }
 
 static void inetdev_send_gratuitous_arp(struct net_device *dev,
index d57aa64..61fe6e4 100644 (file)
@@ -981,6 +981,7 @@ static int esp_init_state(struct xfrm_state *x)
 
                switch (encap->encap_type) {
                default:
+                       err = -EINVAL;
                        goto error;
                case UDP_ENCAP_ESPINUDP:
                        x->props.header_len += sizeof(struct udphdr);
index f8b918c..29b333a 100644 (file)
@@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
        __be32 spi;
        int err;
 
-       skb_pull(skb, offset);
+       if (!pskb_pull(skb, offset))
+               return NULL;
 
        if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
                goto out;
@@ -121,6 +122,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
        if (!xo)
                goto out;
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
+               goto out;
+
        seq = xo->seq.low;
 
        x = skb->sp->xvec[skb->sp->len - 1];
index f52d27a..08259d0 100644 (file)
@@ -1298,14 +1298,19 @@ err_table_hash_alloc:
 
 static void ip_fib_net_exit(struct net *net)
 {
-       unsigned int i;
+       int i;
 
        rtnl_lock();
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
        RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
 #endif
-       for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
+       /* Destroy the tables in reverse order to guarantee that the
+        * local table, ID 255, is destroyed before the main table, ID
+        * 254. This is necessary as the local table may contain
+        * references to data contained in the main table.
+        */
+       for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[i];
                struct hlist_node *tmp;
                struct fib_table *tb;
index f04d944..c586597 100644 (file)
@@ -698,7 +698,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
 
        nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
                int type = nla_type(nla);
-               u32 val;
+               u32 fi_val, val;
 
                if (!type)
                        continue;
@@ -715,7 +715,11 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
                        val = nla_get_u32(nla);
                }
 
-               if (fi->fib_metrics->metrics[type - 1] != val)
+               fi_val = fi->fib_metrics->metrics[type - 1];
+               if (type == RTAX_FEATURES)
+                       fi_val &= ~DST_FEATURE_ECN_CA;
+
+               if (fi_val != val)
                        return false;
        }
 
index d1f8f30..2d49717 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/times.h>
 #include <linux/pkt_sched.h>
+#include <linux/byteorder/generic.h>
 
 #include <net/net_namespace.h>
 #include <net/arp.h>
@@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
        return scount;
 }
 
+/* source address selection per RFC 3376 section 4.2.13 */
+static __be32 igmpv3_get_srcaddr(struct net_device *dev,
+                                const struct flowi4 *fl4)
+{
+       struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+       if (!in_dev)
+               return htonl(INADDR_ANY);
+
+       for_ifa(in_dev) {
+               if (fl4->saddr == ifa->ifa_local)
+                       return fl4->saddr;
+       } endfor_ifa(in_dev);
+
+       return htonl(INADDR_ANY);
+}
+
 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
 {
        struct sk_buff *skb;
@@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
        pip->frag_off = htons(IP_DF);
        pip->ttl      = 1;
        pip->daddr    = fl4.daddr;
-       pip->saddr    = fl4.saddr;
+       pip->saddr    = igmpv3_get_srcaddr(dev, &fl4);
        pip->protocol = IPPROTO_IGMP;
        pip->tot_len  = 0;      /* filled in later */
        ip_select_ident(net, skb, NULL);
@@ -404,16 +422,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
 }
 
 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
-       int type, struct igmpv3_grec **ppgr)
+       int type, struct igmpv3_grec **ppgr, unsigned int mtu)
 {
        struct net_device *dev = pmc->interface->dev;
        struct igmpv3_report *pih;
        struct igmpv3_grec *pgr;
 
-       if (!skb)
-               skb = igmpv3_newpack(dev, dev->mtu);
-       if (!skb)
-               return NULL;
+       if (!skb) {
+               skb = igmpv3_newpack(dev, mtu);
+               if (!skb)
+                       return NULL;
+       }
        pgr = skb_put(skb, sizeof(struct igmpv3_grec));
        pgr->grec_type = type;
        pgr->grec_auxwords = 0;
@@ -436,12 +455,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
        struct igmpv3_grec *pgr = NULL;
        struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
        int scount, stotal, first, isquery, truncate;
+       unsigned int mtu;
 
        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                return skb;
        if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
                return skb;
 
+       mtu = READ_ONCE(dev->mtu);
+       if (mtu < IPV4_MIN_MTU)
+               return skb;
+
        isquery = type == IGMPV3_MODE_IS_INCLUDE ||
                  type == IGMPV3_MODE_IS_EXCLUDE;
        truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
@@ -462,7 +486,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
                    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
                        if (skb)
                                igmpv3_sendpack(skb);
-                       skb = igmpv3_newpack(dev, dev->mtu);
+                       skb = igmpv3_newpack(dev, mtu);
                }
        }
        first = 1;
@@ -498,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
                                pgr->grec_nsrcs = htons(scount);
                        if (skb)
                                igmpv3_sendpack(skb);
-                       skb = igmpv3_newpack(dev, dev->mtu);
+                       skb = igmpv3_newpack(dev, mtu);
                        first = 1;
                        scount = 0;
                }
                if (first) {
-                       skb = add_grhead(skb, pmc, type, &pgr);
+                       skb = add_grhead(skb, pmc, type, &pgr, mtu);
                        first = 0;
                }
                if (!skb)
@@ -538,7 +562,7 @@ empty_source:
                                igmpv3_sendpack(skb);
                                skb = NULL; /* add_grhead will get a new one */
                        }
-                       skb = add_grhead(skb, pmc, type, &pgr);
+                       skb = add_grhead(skb, pmc, type, &pgr, mtu);
                }
        }
        if (pgr)
index c690cd0..b563e0c 100644 (file)
@@ -93,7 +93,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
 }
 
 /*
- * Enter the time wait state.
+ * Enter the time wait state. This is called with locally disabled BH.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
  * from the SK, and mess with hash chains and list linkage.
  */
@@ -111,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
         */
        bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
                        hashinfo->bhash_size)];
-       spin_lock_bh(&bhead->lock);
+       spin_lock(&bhead->lock);
        tw->tw_tb = icsk->icsk_bind_hash;
        WARN_ON(!icsk->icsk_bind_hash);
        inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
@@ -137,7 +137,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
        if (__sk_nulls_del_node_init_rcu(sk))
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
-       spin_unlock_bh(lock);
+       spin_unlock(lock);
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
 
index bb62391..45ffd3d 100644 (file)
@@ -266,7 +266,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
        len = gre_hdr_len + sizeof(*ershdr);
 
        if (unlikely(!pskb_may_pull(skb, len)))
-               return -ENOMEM;
+               return PACKET_REJECT;
 
        iph = ip_hdr(skb);
        ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
@@ -1310,6 +1310,7 @@ static const struct net_device_ops erspan_netdev_ops = {
 static void ipgre_tap_setup(struct net_device *dev)
 {
        ether_setup(dev);
+       dev->max_mtu = 0;
        dev->netdev_ops = &gre_tap_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
index fe6fee7..6d21068 100644 (file)
@@ -349,8 +349,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
        dev->needed_headroom = t_hlen + hlen;
        mtu -= (dev->hard_header_len + t_hlen);
 
-       if (mtu < 68)
-               mtu = 68;
+       if (mtu < IPV4_MIN_MTU)
+               mtu = IPV4_MIN_MTU;
 
        return mtu;
 }
@@ -520,8 +520,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
        else
                mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 
-       if (skb_dst(skb))
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       skb_dst_update_pmtu(skb, mtu);
 
        if (skb->protocol == htons(ETH_P_IP)) {
                if (!skb_is_gso(skb) &&
index 949f432..51b1669 100644 (file)
@@ -200,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
 
        mtu = dst_mtu(dst);
        if (skb->len > mtu) {
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+               skb_dst_update_pmtu(skb, mtu);
                if (skb->protocol == htons(ETH_P_IP)) {
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                  htonl(mtu));
index f88221a..0c3c944 100644 (file)
@@ -373,7 +373,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                        if (!xt_find_jump_offset(offsets, newpos,
                                                                 newinfo->number))
                                                return 0;
-                                       e = entry0 + newpos;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
index 4cbe5e8..2e0d339 100644 (file)
@@ -439,7 +439,6 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                        if (!xt_find_jump_offset(offsets, newpos,
                                                                 newinfo->number))
                                                return 0;
-                                       e = entry0 + newpos;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
index 17b4ca5..69060e3 100644 (file)
@@ -813,12 +813,13 @@ static int clusterip_net_init(struct net *net)
 
 static void clusterip_net_exit(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
        struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+#ifdef CONFIG_PROC_FS
        proc_remove(cn->procdir);
        cn->procdir = NULL;
 #endif
        nf_unregister_net_hook(net, &cip_arp_ops);
+       WARN_ON_ONCE(!list_empty(&cn->configs));
 }
 
 static struct pernet_operations clusterip_net_ops = {
index 33b70bf..5e570aa 100644 (file)
@@ -513,11 +513,18 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int err;
        struct ip_options_data opt_copy;
        struct raw_frag_vec rfv;
+       int hdrincl;
 
        err = -EMSGSIZE;
        if (len > 0xFFFF)
                goto out;
 
+       /* hdrincl should be READ_ONCE(inet->hdrincl)
+        * but READ_ONCE() doesn't work with bit fields.
+        * Doing this indirectly yields the same result.
+        */
+       hdrincl = inet->hdrincl;
+       hdrincl = READ_ONCE(hdrincl);
        /*
         *      Check the flags.
         */
@@ -593,7 +600,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                /* Linux does not mangle headers on raw sockets,
                 * so that IP options + IP_HDRINCL is non-sense.
                 */
-               if (inet->hdrincl)
+               if (hdrincl)
                        goto done;
                if (ipc.opt->opt.srr) {
                        if (!daddr)
@@ -615,12 +622,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
                           RT_SCOPE_UNIVERSE,
-                          inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+                          hdrincl ? IPPROTO_RAW : sk->sk_protocol,
                           inet_sk_flowi_flags(sk) |
-                           (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
+                           (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
                           daddr, saddr, 0, 0, sk->sk_uid);
 
-       if (!inet->hdrincl) {
+       if (!hdrincl) {
                rfv.msg = msg;
                rfv.hlen = 0;
 
@@ -645,7 +652,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                goto do_confirm;
 back_from_confirm:
 
-       if (inet->hdrincl)
+       if (hdrincl)
                err = raw_send_hdrinc(sk, &fl4, msg, len,
                                      &rt, msg->msg_flags, &ipc.sockc);
 
index 43b69af..4e153b2 100644 (file)
@@ -2762,6 +2762,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                if (err == 0 && rt->dst.error)
                        err = -rt->dst.error;
        } else {
+               fl4.flowi4_iif = LOOPBACK_IFINDEX;
                rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
                err = 0;
                if (IS_ERR(rt))
index bf97317..8e053ad 100644 (file)
@@ -2298,6 +2298,9 @@ adjudge_to_death:
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                        __NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPABORTONMEMORY);
+               } else if (!check_net(sock_net(sk))) {
+                       /* Not possible to send reset; just close */
+                       tcp_set_state(sk, TCP_CLOSE);
                }
        }
 
@@ -2412,6 +2415,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->snd_cwnd_cnt = 0;
        tp->window_clamp = 0;
        tcp_set_ca_state(sk, TCP_CA_Open);
+       tp->is_sack_reneg = 0;
        tcp_clear_retrans(tp);
        inet_csk_delack_init(sk);
        /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
index 69ee877..8322f26 100644 (file)
@@ -110,7 +110,8 @@ struct bbr {
        u32     lt_last_lost;        /* LT intvl start: tp->lost */
        u32     pacing_gain:10, /* current gain for setting pacing rate */
                cwnd_gain:10,   /* current gain for setting cwnd */
-               full_bw_cnt:3,  /* number of rounds without large bw gains */
+               full_bw_reached:1,   /* reached full bw in Startup? */
+               full_bw_cnt:2,  /* number of rounds without large bw gains */
                cycle_idx:3,    /* current index in pacing_gain cycle array */
                has_seen_rtt:1, /* have we seen an RTT sample yet? */
                unused_b:5;
@@ -180,7 +181,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
 {
        const struct bbr *bbr = inet_csk_ca(sk);
 
-       return bbr->full_bw_cnt >= bbr_full_bw_cnt;
+       return bbr->full_bw_reached;
 }
 
 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
@@ -717,6 +718,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
                return;
        }
        ++bbr->full_bw_cnt;
+       bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
 }
 
 /* If pipe is probably full, drain the queue and then enter steady-state. */
@@ -850,6 +852,7 @@ static void bbr_init(struct sock *sk)
        bbr->restore_cwnd = 0;
        bbr->round_start = 0;
        bbr->idle_restart = 0;
+       bbr->full_bw_reached = 0;
        bbr->full_bw = 0;
        bbr->full_bw_cnt = 0;
        bbr->cycle_mstamp = 0;
@@ -871,6 +874,11 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
  */
 static u32 bbr_undo_cwnd(struct sock *sk)
 {
+       struct bbr *bbr = inet_csk_ca(sk);
+
+       bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
+       bbr->full_bw_cnt = 0;
+       bbr_reset_lt_bw_sampling(sk);
        return tcp_sk(sk)->snd_cwnd;
 }
 
index 734cfc8..45f750e 100644 (file)
@@ -508,9 +508,6 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
        u32 new_sample = tp->rcv_rtt_est.rtt_us;
        long m = sample;
 
-       if (m == 0)
-               m = 1;
-
        if (new_sample != 0) {
                /* If we sample in larger samples in the non-timestamp
                 * case, we could grossly overestimate the RTT especially
@@ -547,6 +544,8 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
        if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
                return;
        delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
+       if (!delta_us)
+               delta_us = 1;
        tcp_rcv_rtt_update(tp, delta_us, 1);
 
 new_measure:
@@ -563,8 +562,11 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
            (TCP_SKB_CB(skb)->end_seq -
             TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
                u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
-               u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+               u32 delta_us;
 
+               if (!delta)
+                       delta = 1;
+               delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
                tcp_rcv_rtt_update(tp, delta_us, 0);
        }
 }
@@ -579,6 +581,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
        int time;
        int copied;
 
+       tcp_mstamp_refresh(tp);
        time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
        if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
                return;
@@ -1941,6 +1944,8 @@ void tcp_enter_loss(struct sock *sk)
        if (is_reneg) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
                tp->sacked_out = 0;
+               /* Mark SACK reneging until we recover from this loss event. */
+               tp->is_sack_reneg = 1;
        }
        tcp_clear_all_retrans_hints(tp);
 
@@ -2326,6 +2331,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
        }
        tp->snd_cwnd_stamp = tcp_jiffies32;
        tp->undo_marker = 0;
+       tp->rack.advanced = 1; /* Force RACK to re-exam losses */
 }
 
 static inline bool tcp_may_undo(const struct tcp_sock *tp)
@@ -2364,6 +2370,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
                return true;
        }
        tcp_set_ca_state(sk, TCP_CA_Open);
+       tp->is_sack_reneg = 0;
        return false;
 }
 
@@ -2397,8 +2404,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
                        NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPSPURIOUSRTOS);
                inet_csk(sk)->icsk_retransmits = 0;
-               if (frto_undo || tcp_is_sack(tp))
+               if (frto_undo || tcp_is_sack(tp)) {
                        tcp_set_ca_state(sk, TCP_CA_Open);
+                       tp->is_sack_reneg = 0;
+               }
                return true;
        }
        return false;
@@ -3495,6 +3504,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        struct tcp_sacktag_state sack_state;
        struct rate_sample rs = { .prior_delivered = 0 };
        u32 prior_snd_una = tp->snd_una;
+       bool is_sack_reneg = tp->is_sack_reneg;
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
        bool is_dupack = false;
@@ -3611,7 +3621,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        delivered = tp->delivered - delivered;  /* freshly ACKed or SACKed */
        lost = tp->lost - lost;                 /* freshly marked lost */
-       tcp_rate_gen(sk, delivered, lost, sack_state.rate);
+       tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
        tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
        tcp_xmit_recovery(sk, rexmit);
        return 1;
index c6bc0c4..94e2835 100644 (file)
@@ -848,7 +848,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
                        tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
                        req->ts_recent,
                        0,
-                       tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
+                       tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
                                          AF_INET),
                        inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
                        ip_hdr(skb)->tos);
@@ -1591,6 +1591,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_filter);
 
+static void tcp_v4_restore_cb(struct sk_buff *skb)
+{
+       memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
+               sizeof(struct inet_skb_parm));
+}
+
+static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
+                          const struct tcphdr *th)
+{
+       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
+        * barrier() makes sure compiler wont play fool^Waliasing games.
+        */
+       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
+               sizeof(struct inet_skb_parm));
+       barrier();
+
+       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+                                   skb->len - th->doff * 4);
+       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
+       TCP_SKB_CB(skb)->sacked  = 0;
+       TCP_SKB_CB(skb)->has_rxtstamp =
+                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
+}
+
 /*
  *     From tcp_input.c
  */
@@ -1631,24 +1659,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
 
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
-       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
-        * barrier() makes sure compiler wont play fool^Waliasing games.
-        */
-       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
-               sizeof(struct inet_skb_parm));
-       barrier();
-
-       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
-       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
-                                   skb->len - th->doff * 4);
-       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
-       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
-       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
-       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
-       TCP_SKB_CB(skb)->sacked  = 0;
-       TCP_SKB_CB(skb)->has_rxtstamp =
-                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
-
 lookup:
        sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
                               th->dest, sdif, &refcounted);
@@ -1679,14 +1689,19 @@ process:
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       iph = ip_hdr(skb);
+                       tcp_v4_fill_cb(skb, iph, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
                }
                if (nsk == sk) {
                        reqsk_put(req);
+                       tcp_v4_restore_cb(skb);
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v4_send_reset(nsk, skb);
                        goto discard_and_relse;
@@ -1712,6 +1727,7 @@ process:
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
+       tcp_v4_fill_cb(skb, iph, th);
 
        skb->dev = NULL;
 
@@ -1742,6 +1758,8 @@ no_tcp_socket:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard_it;
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
 csum_error:
                __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
@@ -1768,6 +1786,8 @@ do_time_wait:
                goto discard_it;
        }
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
                inet_twsk_put(inet_twsk(sk));
                goto csum_error;
@@ -1784,6 +1804,7 @@ do_time_wait:
                if (sk2) {
                        inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
+                       tcp_v4_restore_cb(skb);
                        refcounted = false;
                        goto process;
                }
index e36eff0..b079b61 100644 (file)
@@ -310,10 +310,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                if (state == TCP_TIME_WAIT)
                        timeo = TCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
index b6a2aa1..4d58e2c 100644 (file)
@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
                                        netdev_features_t features)
 {
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
+               return ERR_PTR(-EINVAL);
+
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                return ERR_PTR(-EINVAL);
 
index 3330a37..c61240e 100644 (file)
@@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
 
 /* Update the connection delivery information and generate a rate sample. */
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
-                 struct rate_sample *rs)
+                 bool is_sack_reneg, struct rate_sample *rs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u32 snd_us, ack_us;
@@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
 
        rs->acked_sacked = delivered;   /* freshly ACKed or SACKed */
        rs->losses = lost;              /* freshly marked lost */
-       /* Return an invalid sample if no timing information is available. */
-       if (!rs->prior_mstamp) {
+       /* Return an invalid sample if no timing information is available or
+        * in recovery from loss with SACK reneging. Rate samples taken during
+        * a SACK reneging event may overestimate bw by including packets that
+        * were SACKed before the reneg.
+        */
+       if (!rs->prior_mstamp || is_sack_reneg) {
                rs->delivered = -1;
                rs->interval_us = -1;
                return;
index d3ea890..3a81720 100644 (file)
@@ -55,7 +55,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
         * to queuing or delayed ACKs.
         */
        reo_wnd = 1000;
-       if ((tp->rack.reord || !tp->lost_out) && min_rtt != ~0U) {
+       if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) &&
+           min_rtt != ~0U) {
                reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
                reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
        }
@@ -79,12 +80,12 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
                 */
                remaining = tp->rack.rtt_us + reo_wnd -
                            tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
-               if (remaining < 0) {
+               if (remaining <= 0) {
                        tcp_rack_mark_skb_lost(sk, skb);
                        list_del_init(&skb->tcp_tsorted_anchor);
                } else {
-                       /* Record maximum wait time (+1 to avoid 0) */
-                       *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
+                       /* Record maximum wait time */
+                       *reo_timeout = max_t(u32, *reo_timeout, remaining);
                }
        }
 }
@@ -116,13 +117,8 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
 {
        u32 rtt_us;
 
-       if (tp->rack.mstamp &&
-           !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
-                                end_seq, tp->rack.end_seq))
-               return;
-
        rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
-       if (sacked & TCPCB_RETRANS) {
+       if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
                /* If the sacked packet was retransmitted, it's ambiguous
                 * whether the retransmission or the original (or the prior
                 * retransmission) was sacked.
@@ -133,13 +129,15 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
                 * so it's at least one RTT (i.e., retransmission is at least
                 * an RTT later).
                 */
-               if (rtt_us < tcp_min_rtt(tp))
-                       return;
+               return;
        }
-       tp->rack.rtt_us = rtt_us;
-       tp->rack.mstamp = xmit_time;
-       tp->rack.end_seq = end_seq;
        tp->rack.advanced = 1;
+       tp->rack.rtt_us = rtt_us;
+       if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
+                               end_seq, tp->rack.end_seq)) {
+               tp->rack.mstamp = xmit_time;
+               tp->rack.end_seq = end_seq;
+       }
 }
 
 /* We have waited long enough to accommodate reordering. Mark the expired
index 16df6dd..388158c 100644 (file)
@@ -48,11 +48,19 @@ static void tcp_write_err(struct sock *sk)
  *  to prevent DoS attacks. It is called when a retransmission timeout
  *  or zero probe timeout occurs on orphaned socket.
  *
+ *  Also close if our net namespace is exiting; in that case there is no
+ *  hope of ever communicating again since all netns interfaces are already
+ *  down (or about to be down), and we need to release our dst references,
+ *  which have been moved to the netns loopback interface, so the namespace
+ *  can finish exiting.  This condition is only possible if we are a kernel
+ *  socket, as those do not hold references to the namespace.
+ *
  *  Criteria is still not confirmed experimentally and may change.
  *  We kill the socket, if:
  *  1. If number of orphaned sockets exceeds an administratively configured
  *     limit.
  *  2. If we have strong memory pressure.
+ *  3. If our net namespace is exiting.
  */
 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
 {
@@ -81,6 +89,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
                return 1;
        }
+
+       if (!check_net(sock_net(sk))) {
+               /* Not possible to send reset; just close */
+               tcp_done(sk);
+               return 1;
+       }
+
        return 0;
 }
 
@@ -264,6 +279,7 @@ void tcp_delack_timer_handler(struct sock *sk)
                        icsk->icsk_ack.pingpong = 0;
                        icsk->icsk_ack.ato      = TCP_ATO_MIN;
                }
+               tcp_mstamp_refresh(tcp_sk(sk));
                tcp_send_ack(sk);
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
@@ -632,6 +648,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
                goto out;
        }
 
+       tcp_mstamp_refresh(tp);
        if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
                if (tp->linger2 >= 0) {
                        const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
index 01801b7..ea6e6e7 100644 (file)
@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
                goto out;
        }
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
+               goto out;
+
        if (!pskb_may_pull(skb, sizeof(struct udphdr)))
                goto out;
 
index e50b7fe..bcfc00e 100644 (file)
@@ -23,6 +23,12 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
        return xfrm4_extract_header(skb);
 }
 
+static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
+                                  struct sk_buff *skb)
+{
+       return dst_input(skb);
+}
+
 static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
                                         struct sk_buff *skb)
 {
@@ -33,7 +39,11 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
                                         iph->tos, skb->dev))
                        goto drop;
        }
-       return dst_input(skb);
+
+       if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2))
+               goto drop;
+
+       return 0;
 drop:
        kfree_skb(skb);
        return NET_RX_DROP;
index e6265e2..20ca486 100644 (file)
@@ -92,6 +92,7 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb_reset_network_header(skb);
        skb_mac_header_rebuild(skb);
+       eth_hdr(skb)->h_proto = skb->protocol;
 
        err = 0;
 
index c26f712..c9441ca 100644 (file)
@@ -210,7 +210,6 @@ lookup_protocol:
        np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
        np->mc_loop     = 1;
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
-       np->autoflowlabel = ip6_default_np_autolabel(net);
        np->repflow     = net->ipv6.sysctl.flowlabel_reflect;
        sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
 
index a902ff8..1a7f00c 100644 (file)
@@ -890,13 +890,12 @@ static int esp6_init_state(struct xfrm_state *x)
                        x->props.header_len += IPV4_BEET_PHMAXLEN +
                                               (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
                break;
+       default:
        case XFRM_MODE_TRANSPORT:
                break;
        case XFRM_MODE_TUNNEL:
                x->props.header_len += sizeof(struct ipv6hdr);
                break;
-       default:
-               goto error;
        }
 
        align = ALIGN(crypto_aead_blocksize(aead), 4);
index 333a478..f52c314 100644 (file)
@@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
        int nhoff;
        int err;
 
-       skb_pull(skb, offset);
+       if (!pskb_pull(skb, offset))
+               return NULL;
 
        if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
                goto out;
@@ -148,6 +149,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
        if (!xo)
                goto out;
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
+               goto out;
+
        seq = xo->seq.low;
 
        x = skb->sp->xvec[skb->sp->len - 1];
index 83bd757..bc68eb6 100644 (file)
@@ -925,6 +925,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
        sr_phdr->segments[0] = **addr_p;
        *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
 
+       if (sr_ihdr->hdrlen > hops * 2) {
+               int tlvs_offset, tlvs_length;
+
+               tlvs_offset = (1 + hops * 2) << 3;
+               tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
+               memcpy((char *)sr_phdr + tlvs_offset,
+                      (char *)sr_ihdr + tlvs_offset, tlvs_length);
+       }
+
 #ifdef CONFIG_IPV6_SEG6_HMAC
        if (sr_has_hmac(sr_phdr)) {
                struct net *net = NULL;
index f5285f4..217683d 100644 (file)
@@ -640,6 +640,11 @@ static struct fib6_node *fib6_add_1(struct net *net,
                        if (!(fn->fn_flags & RTN_RTINFO)) {
                                RCU_INIT_POINTER(fn->leaf, NULL);
                                rt6_release(leaf);
+                       /* remove null_entry in the root node */
+                       } else if (fn->fn_flags & RTN_TL_ROOT &&
+                                  rcu_access_pointer(fn->leaf) ==
+                                  net->ipv6.ip6_null_entry) {
+                               RCU_INIT_POINTER(fn->leaf, NULL);
                        }
 
                        return fn;
@@ -1221,8 +1226,14 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
                }
 
                if (!rcu_access_pointer(fn->leaf)) {
-                       atomic_inc(&rt->rt6i_ref);
-                       rcu_assign_pointer(fn->leaf, rt);
+                       if (fn->fn_flags & RTN_TL_ROOT) {
+                               /* put back null_entry for root node */
+                               rcu_assign_pointer(fn->leaf,
+                                           info->nl_net->ipv6.ip6_null_entry);
+                       } else {
+                               atomic_inc(&rt->rt6i_ref);
+                               rcu_assign_pointer(fn->leaf, rt);
+                       }
                }
                fn = sn;
        }
@@ -1241,23 +1252,28 @@ out:
                 * If fib6_add_1 has cleared the old leaf pointer in the
                 * super-tree leaf node we have to find a new one for it.
                 */
-               struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf,
-                                           lockdep_is_held(&table->tb6_lock));
-               if (pn != fn && pn_leaf == rt) {
-                       pn_leaf = NULL;
-                       RCU_INIT_POINTER(pn->leaf, NULL);
-                       atomic_dec(&rt->rt6i_ref);
-               }
-               if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
-                       pn_leaf = fib6_find_prefix(info->nl_net, table, pn);
-#if RT6_DEBUG >= 2
-                       if (!pn_leaf) {
-                               WARN_ON(!pn_leaf);
-                               pn_leaf = info->nl_net->ipv6.ip6_null_entry;
+               if (pn != fn) {
+                       struct rt6_info *pn_leaf =
+                               rcu_dereference_protected(pn->leaf,
+                                   lockdep_is_held(&table->tb6_lock));
+                       if (pn_leaf == rt) {
+                               pn_leaf = NULL;
+                               RCU_INIT_POINTER(pn->leaf, NULL);
+                               atomic_dec(&rt->rt6i_ref);
                        }
+                       if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
+                               pn_leaf = fib6_find_prefix(info->nl_net, table,
+                                                          pn);
+#if RT6_DEBUG >= 2
+                               if (!pn_leaf) {
+                                       WARN_ON(!pn_leaf);
+                                       pn_leaf =
+                                           info->nl_net->ipv6.ip6_null_entry;
+                               }
 #endif
-                       atomic_inc(&pn_leaf->rt6i_ref);
-                       rcu_assign_pointer(pn->leaf, pn_leaf);
+                               atomic_inc(&pn_leaf->rt6i_ref);
+                               rcu_assign_pointer(pn->leaf, pn_leaf);
+                       }
                }
 #endif
                goto failure;
@@ -1265,13 +1281,17 @@ out:
        return err;
 
 failure:
-       /* fn->leaf could be NULL if fn is an intermediate node and we
-        * failed to add the new route to it in both subtree creation
-        * failure and fib6_add_rt2node() failure case.
-        * In both cases, fib6_repair_tree() should be called to fix
-        * fn->leaf.
+       /* fn->leaf could be NULL and fib6_repair_tree() needs to be called if:
+        * 1. fn is an intermediate node and we failed to add the new
+        * route to it in both subtree creation failure and fib6_add_rt2node()
+        * failure case.
+        * 2. fn is the root node in the table and we fail to add the first
+        * default route to it.
         */
-       if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
+       if (fn &&
+           (!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) ||
+            (fn->fn_flags & RTN_TL_ROOT &&
+             !rcu_access_pointer(fn->leaf))))
                fib6_repair_tree(info->nl_net, table, fn);
        /* Always release dst as dst->__refcnt is guaranteed
         * to be taken before entering this function
@@ -1526,6 +1546,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
        struct fib6_walker *w;
        int iter = 0;
 
+       /* Set fn->leaf to null_entry for root node. */
+       if (fn->fn_flags & RTN_TL_ROOT) {
+               rcu_assign_pointer(fn->leaf, net->ipv6.ip6_null_entry);
+               return fn;
+       }
+
        for (;;) {
                struct fib6_node *fn_r = rcu_dereference_protected(fn->right,
                                            lockdep_is_held(&table->tb6_lock));
@@ -1680,10 +1706,15 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
        }
        read_unlock(&net->ipv6.fib6_walker_lock);
 
-       /* If it was last route, expunge its radix tree node */
+       /* If it was last route, call fib6_repair_tree() to:
+        * 1. For root node, put back null_entry as how the table was created.
+        * 2. For other nodes, expunge its radix tree node.
+        */
        if (!rcu_access_pointer(fn->leaf)) {
-               fn->fn_flags &= ~RTN_RTINFO;
-               net->ipv6.rt6_stats->fib_route_nodes--;
+               if (!(fn->fn_flags & RTN_TL_ROOT)) {
+                       fn->fn_flags &= ~RTN_RTINFO;
+                       net->ipv6.rt6_stats->fib_route_nodes--;
+               }
                fn = fib6_repair_tree(net, table, fn);
        }
 
index 4cfd8e0..8735492 100644 (file)
@@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
 
        nt->dev = dev;
        nt->net = dev_net(dev);
-       ip6gre_tnl_link_config(nt, 1);
 
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       ip6gre_tnl_link_config(nt, 1);
+
        /* Can use a lockless transmit, unless we generate output sequences */
        if (!(nt->parms.o_flags & TUNNEL_SEQ))
                dev->features |= NETIF_F_LLTX;
@@ -1014,6 +1015,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
        eth_random_addr(dev->perm_addr);
 }
 
+#define GRE6_FEATURES (NETIF_F_SG |            \
+                      NETIF_F_FRAGLIST |       \
+                      NETIF_F_HIGHDMA |        \
+                      NETIF_F_HW_CSUM)
+
+static void ip6gre_tnl_init_features(struct net_device *dev)
+{
+       struct ip6_tnl *nt = netdev_priv(dev);
+
+       dev->features           |= GRE6_FEATURES;
+       dev->hw_features        |= GRE6_FEATURES;
+
+       if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
+               /* TCP offload with GRE SEQ is not supported, nor
+                * can we support 2 levels of outer headers requiring
+                * an update.
+                */
+               if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
+                   nt->encap.type == TUNNEL_ENCAP_NONE) {
+                       dev->features    |= NETIF_F_GSO_SOFTWARE;
+                       dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+               }
+
+               /* Can use a lockless transmit, unless we generate
+                * output sequences
+                */
+               dev->features |= NETIF_F_LLTX;
+       }
+}
+
 static int ip6gre_tunnel_init_common(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
@@ -1048,6 +1079,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
        if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                dev->mtu -= 8;
 
+       ip6gre_tnl_init_features(dev);
+
        return 0;
 }
 
@@ -1271,7 +1304,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
 
 static int ip6gre_tap_init(struct net_device *dev)
 {
-       struct ip6_tnl *tunnel;
        int ret;
 
        ret = ip6gre_tunnel_init_common(dev);
@@ -1280,10 +1312,6 @@ static int ip6gre_tap_init(struct net_device *dev)
 
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
-       tunnel = netdev_priv(dev);
-
-       ip6gre_tnl_link_config(tunnel, 1);
-
        return 0;
 }
 
@@ -1298,16 +1326,12 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
        .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
-#define GRE6_FEATURES (NETIF_F_SG |            \
-                      NETIF_F_FRAGLIST |       \
-                      NETIF_F_HIGHDMA |                \
-                      NETIF_F_HW_CSUM)
-
 static void ip6gre_tap_setup(struct net_device *dev)
 {
 
        ether_setup(dev);
 
+       dev->max_mtu = 0;
        dev->netdev_ops = &ip6gre_tap_netdev_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
@@ -1380,32 +1404,16 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
 
        nt->dev = dev;
        nt->net = dev_net(dev);
-       ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
-
-       dev->features           |= GRE6_FEATURES;
-       dev->hw_features        |= GRE6_FEATURES;
-
-       if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
-               /* TCP offload with GRE SEQ is not supported, nor
-                * can we support 2 levels of outer headers requiring
-                * an update.
-                */
-               if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
-                   (nt->encap.type == TUNNEL_ENCAP_NONE)) {
-                       dev->features    |= NETIF_F_GSO_SOFTWARE;
-                       dev->hw_features |= NETIF_F_GSO_SOFTWARE;
-               }
-
-               /* Can use a lockless transmit, unless we generate
-                * output sequences
-                */
-               dev->features |= NETIF_F_LLTX;
-       }
 
        err = register_netdevice(dev);
        if (err)
                goto out;
 
+       ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+
+       if (tb[IFLA_MTU])
+               ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+
        dev_hold(dev);
        ip6gre_tunnel_link(ign, nt);
 
index 5110a41..3763dc0 100644 (file)
@@ -166,6 +166,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
+bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+{
+       if (!np->autoflowlabel_set)
+               return ip6_default_np_autolabel(net);
+       else
+               return np->autoflowlabel;
+}
+
 /*
  * xmit an sk_buff (used by TCP, SCTP and DCCP)
  * Note : socket lock is not held for SYNACK packets, but might be modified
@@ -230,7 +238,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                hlimit = ip6_dst_hoplimit(dst);
 
        ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                                    np->autoflowlabel, fl6));
+                               ip6_autoflowlabel(net, np), fl6));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -1198,14 +1206,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
        v6_cork->tclass = ipc6->tclass;
        if (rt->dst.flags & DST_XFRM_TUNNEL)
                mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
-                     rt->dst.dev->mtu : dst_mtu(&rt->dst);
+                     READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
        else
                mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
-                     rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+                     READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
        if (np->frag_size < mtu) {
                if (np->frag_size)
                        mtu = np->frag_size;
        }
+       if (mtu < IPV6_MIN_MTU)
+               return -EINVAL;
        cork->base.fragsize = mtu;
        if (dst_allfrag(rt->dst.path))
                cork->base.flags |= IPCORK_ALLFRAG;
@@ -1626,7 +1636,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
 
        ip6_flow_hdr(hdr, v6_cork->tclass,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                       np->autoflowlabel, fl6));
+                                       ip6_autoflowlabel(net, np), fl6));
        hdr->hop_limit = v6_cork->hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
@@ -1725,11 +1735,13 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
        cork.base.flags = 0;
        cork.base.addr = 0;
        cork.base.opt = NULL;
+       cork.base.dst = NULL;
        v6_cork.opt = NULL;
        err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
-       if (err)
+       if (err) {
+               ip6_cork_release(&cork, &v6_cork);
                return ERR_PTR(err);
-
+       }
        if (ipc6->dontfrag < 0)
                ipc6->dontfrag = inet6_sk(sk)->dontfrag;
 
index 3d3092a..1ee5584 100644 (file)
@@ -642,8 +642,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (rel_info > dst_mtu(skb_dst(skb2)))
                        goto out;
 
-               skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2,
-                                               rel_info);
+               skb_dst_update_pmtu(skb2, rel_info);
        }
 
        icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -904,7 +903,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
                if (t->parms.collect_md) {
                        tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
                        if (!tun_dst)
-                               return 0;
+                               goto drop;
                }
                ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
                                    log_ecn_error);
@@ -1074,10 +1073,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
                        memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
                        neigh_release(neigh);
                }
-       } else if (!(t->parms.flags &
-                    (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
-               /* enable the cache only only if the routing decision does
-                * not depend on the current inner header value
+       } else if (t->parms.proto != 0 && !(t->parms.flags &
+                                           (IP6_TNL_F_USE_ORIG_TCLASS |
+                                            IP6_TNL_F_USE_ORIG_FWMARK))) {
+               /* enable the cache only if neither the outer protocol nor the
+                * routing decision depends on the current inner header value
                 */
                use_cache = true;
        }
@@ -1123,10 +1123,14 @@ route_lookup:
                max_headroom += 8;
                mtu -= 8;
        }
-       if (mtu < IPV6_MIN_MTU)
-               mtu = IPV6_MIN_MTU;
-       if (skb_dst(skb) && !t->parms.collect_md)
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       if (skb->protocol == htons(ETH_P_IPV6)) {
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
+       } else if (mtu < 576) {
+               mtu = 576;
+       }
+
+       skb_dst_update_pmtu(skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
                *pmtu = mtu;
                err = -EMSGSIZE;
@@ -1671,11 +1675,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct ip6_tnl *tnl = netdev_priv(dev);
 
-       if (tnl->parms.proto == IPPROTO_IPIP) {
-               if (new_mtu < ETH_MIN_MTU)
+       if (tnl->parms.proto == IPPROTO_IPV6) {
+               if (new_mtu < IPV6_MIN_MTU)
                        return -EINVAL;
        } else {
-               if (new_mtu < IPV6_MIN_MTU)
+               if (new_mtu < ETH_MIN_MTU)
                        return -EINVAL;
        }
        if (new_mtu > 0xFFF8 - dev->hard_header_len)
index dbb74f3..8c184f8 100644 (file)
@@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
        mtu = dst_mtu(dst);
        if (!skb->ignore_df && skb->len > mtu) {
-               skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
+               skb_dst_update_pmtu(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        if (mtu < IPV6_MIN_MTU)
index b9404fe..e8ffb5b 100644 (file)
@@ -886,6 +886,7 @@ pref_skip_coa:
                break;
        case IPV6_AUTOFLOWLABEL:
                np->autoflowlabel = valbool;
+               np->autoflowlabel_set = 1;
                retv = 0;
                break;
        case IPV6_RECVFRAGSIZE:
@@ -1335,7 +1336,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                break;
 
        case IPV6_AUTOFLOWLABEL:
-               val = np->autoflowlabel;
+               val = ip6_autoflowlabel(sock_net(sk), np);
                break;
 
        case IPV6_RECVFRAGSIZE:
index fc6d7d1..8446426 100644 (file)
@@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
 }
 
 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
-       int type, struct mld2_grec **ppgr)
+       int type, struct mld2_grec **ppgr, unsigned int mtu)
 {
-       struct net_device *dev = pmc->idev->dev;
        struct mld2_report *pmr;
        struct mld2_grec *pgr;
 
-       if (!skb)
-               skb = mld_newpack(pmc->idev, dev->mtu);
-       if (!skb)
-               return NULL;
+       if (!skb) {
+               skb = mld_newpack(pmc->idev, mtu);
+               if (!skb)
+                       return NULL;
+       }
        pgr = skb_put(skb, sizeof(struct mld2_grec));
        pgr->grec_type = type;
        pgr->grec_auxwords = 0;
@@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
        struct mld2_grec *pgr = NULL;
        struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
        int scount, stotal, first, isquery, truncate;
+       unsigned int mtu;
 
        if (pmc->mca_flags & MAF_NOREPORT)
                return skb;
 
+       mtu = READ_ONCE(dev->mtu);
+       if (mtu < IPV6_MIN_MTU)
+               return skb;
+
        isquery = type == MLD2_MODE_IS_INCLUDE ||
                  type == MLD2_MODE_IS_EXCLUDE;
        truncate = type == MLD2_MODE_IS_EXCLUDE ||
@@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
                    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
                        if (skb)
                                mld_sendpack(skb);
-                       skb = mld_newpack(idev, dev->mtu);
+                       skb = mld_newpack(idev, mtu);
                }
        }
        first = 1;
@@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
                                pgr->grec_nsrcs = htons(scount);
                        if (skb)
                                mld_sendpack(skb);
-                       skb = mld_newpack(idev, dev->mtu);
+                       skb = mld_newpack(idev, mtu);
                        first = 1;
                        scount = 0;
                }
                if (first) {
-                       skb = add_grhead(skb, pmc, type, &pgr);
+                       skb = add_grhead(skb, pmc, type, &pgr, mtu);
                        first = 0;
                }
                if (!skb)
@@ -1814,7 +1819,7 @@ empty_source:
                                mld_sendpack(skb);
                                skb = NULL; /* add_grhead will get a new one */
                        }
-                       skb = add_grhead(skb, pmc, type, &pgr);
+                       skb = add_grhead(skb, pmc, type, &pgr, mtu);
                }
        }
        if (pgr)
index f06e250..1d7ae93 100644 (file)
@@ -458,7 +458,6 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                        if (!xt_find_jump_offset(offsets, newpos,
                                                                 newinfo->number))
                                                return 0;
-                                       e = entry0 + newpos;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
index 2b1a158..92c0047 100644 (file)
@@ -33,13 +33,19 @@ static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
 
        if (range->flags & NF_NAT_RANGE_MAP_IPS)
                return -EINVAL;
-       return 0;
+       return nf_ct_netns_get(par->net, par->family);
+}
+
+static void masquerade_tg6_destroy(const struct xt_tgdtor_param *par)
+{
+       nf_ct_netns_put(par->net, par->family);
 }
 
 static struct xt_target masquerade_tg6_reg __read_mostly = {
        .name           = "MASQUERADE",
        .family         = NFPROTO_IPV6,
        .checkentry     = masquerade_tg6_checkentry,
+       .destroy        = masquerade_tg6_destroy,
        .target         = masquerade_tg6,
        .targetsize     = sizeof(struct nf_nat_range),
        .table          = "nat",
index 7a8d150..0458b76 100644 (file)
@@ -2336,6 +2336,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        }
 
        rt->dst.flags |= DST_HOST;
+       rt->dst.input = ip6_input;
        rt->dst.output  = ip6_output;
        rt->rt6i_gateway  = fl6->daddr;
        rt->rt6i_dst.addr = fl6->daddr;
@@ -4297,19 +4298,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                if (!ipv6_addr_any(&fl6.saddr))
                        flags |= RT6_LOOKUP_F_HAS_SADDR;
 
-               if (!fibmatch)
-                       dst = ip6_route_input_lookup(net, dev, &fl6, flags);
-               else
-                       dst = ip6_route_lookup(net, &fl6, 0);
+               dst = ip6_route_input_lookup(net, dev, &fl6, flags);
 
                rcu_read_unlock();
        } else {
                fl6.flowi6_oif = oif;
 
-               if (!fibmatch)
-                       dst = ip6_route_output(net, NULL, &fl6);
-               else
-                       dst = ip6_route_lookup(net, &fl6, 0);
+               dst = ip6_route_output(net, NULL, &fl6);
        }
 
 
@@ -4326,6 +4321,15 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                goto errout;
        }
 
+       if (fibmatch && rt->dst.from) {
+               struct rt6_info *ort = container_of(rt->dst.from,
+                                                   struct rt6_info, dst);
+
+               dst_hold(&ort->dst);
+               ip6_rt_put(rt);
+               rt = ort;
+       }
+
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb) {
                ip6_rt_put(rt);
index d60ddcb..3873d38 100644 (file)
@@ -934,8 +934,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        df = 0;
                }
 
-               if (tunnel->parms.iph.daddr && skb_dst(skb))
-                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+               if (tunnel->parms.iph.daddr)
+                       skb_dst_update_pmtu(skb, mtu);
 
                if (skb->len > mtu && !skb_is_gso(skb)) {
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -1098,6 +1098,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
        ipip6_tunnel_link(sitn, t);
        t->parms.iph.ttl = p->iph.ttl;
        t->parms.iph.tos = p->iph.tos;
+       t->parms.iph.frag_off = p->iph.frag_off;
        if (t->parms.link != p->link || t->fwmark != fwmark) {
                t->parms.link = p->link;
                t->fwmark = fwmark;
index 6bb98c9..7178476 100644 (file)
@@ -994,7 +994,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
                        req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
                        tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
                        req->ts_recent, sk->sk_bound_dev_if,
-                       tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
+                       tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
                        0, 0);
 }
 
@@ -1454,7 +1454,6 @@ process:
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               tcp_v6_fill_cb(skb, hdr, th);
                if (tcp_v6_inbound_md5_hash(sk, skb)) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
@@ -1467,8 +1466,12 @@ process:
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       hdr = ipv6_hdr(skb);
+                       tcp_v6_fill_cb(skb, hdr, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
@@ -1492,8 +1495,6 @@ process:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
-       tcp_v6_fill_cb(skb, hdr, th);
-
        if (tcp_v6_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
 
@@ -1501,6 +1502,7 @@ process:
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        hdr = ipv6_hdr(skb);
+       tcp_v6_fill_cb(skb, hdr, th);
 
        skb->dev = NULL;
 
@@ -1590,7 +1592,6 @@ do_time_wait:
                tcp_v6_timewait_ack(sk, skb);
                break;
        case TCP_TW_RST:
-               tcp_v6_restore_cb(skb);
                tcp_v6_send_reset(sk, skb);
                inet_twsk_deschedule_put(inet_twsk(sk));
                goto discard_it;
index d883c92..278e49c 100644 (file)
@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
 {
        struct tcphdr *th;
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
+               return ERR_PTR(-EINVAL);
+
        if (!pskb_may_pull(skb, sizeof(*th)))
                return ERR_PTR(-EINVAL);
 
index a0f89ad..2a04dc9 100644 (file)
@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                const struct ipv6hdr *ipv6h;
                struct udphdr *uh;
 
+               if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
+                       goto out;
+
                if (!pskb_may_pull(skb, sizeof(struct udphdr)))
                        goto out;
 
index fe04e23..841f4a0 100644 (file)
@@ -32,6 +32,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
 }
 EXPORT_SYMBOL(xfrm6_rcv_spi);
 
+static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
+                                  struct sk_buff *skb)
+{
+       if (xfrm_trans_queue(skb, ip6_rcv_finish))
+               __kfree_skb(skb);
+       return -1;
+}
+
 int xfrm6_transport_finish(struct sk_buff *skb, int async)
 {
        struct xfrm_offload *xo = xfrm_offload(skb);
@@ -56,7 +64,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
 
        NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
                dev_net(skb->dev), NULL, skb, skb->dev, NULL,
-               ip6_rcv_finish);
+               xfrm6_transport_finish2);
        return -1;
 }
 
index 02556e3..dc93002 100644 (file)
@@ -92,6 +92,7 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb_reset_network_header(skb);
        skb_mac_header_rebuild(skb);
+       eth_hdr(skb)->h_proto = skb->protocol;
 
        err = 0;
 
index 0b750a2..4a8d407 100644 (file)
@@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
        if (!csk)
                return -EINVAL;
 
-       /* We must prevent loops or risk deadlock ! */
-       if (csk->sk_family == PF_KCM)
+       /* Only allow TCP sockets to be attached for now */
+       if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
+           csk->sk_protocol != IPPROTO_TCP)
+               return -EOPNOTSUPP;
+
+       /* Don't allow listeners or closed sockets */
+       if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
                return -EOPNOTSUPP;
 
        psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
@@ -1405,9 +1410,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
                return err;
        }
 
-       sock_hold(csk);
-
        write_lock_bh(&csk->sk_callback_lock);
+
+       /* Check if sk_user_data is aready by KCM or someone else.
+        * Must be done under lock to prevent race conditions.
+        */
+       if (csk->sk_user_data) {
+               write_unlock_bh(&csk->sk_callback_lock);
+               strp_done(&psock->strp);
+               kmem_cache_free(kcm_psockp, psock);
+               return -EALREADY;
+       }
+
        psock->save_data_ready = csk->sk_data_ready;
        psock->save_write_space = csk->sk_write_space;
        psock->save_state_change = csk->sk_state_change;
@@ -1415,8 +1429,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
        csk->sk_data_ready = psock_data_ready;
        csk->sk_write_space = psock_write_space;
        csk->sk_state_change = psock_state_change;
+
        write_unlock_bh(&csk->sk_callback_lock);
 
+       sock_hold(csk);
+
        /* Finished initialization, now add the psock to the MUX. */
        spin_lock_bh(&mux->lock);
        head = &mux->psocks;
@@ -1625,60 +1642,30 @@ static struct proto kcm_proto = {
 };
 
 /* Clone a kcm socket. */
-static int kcm_clone(struct socket *osock, struct kcm_clone *info,
-                    struct socket **newsockp)
+static struct file *kcm_clone(struct socket *osock)
 {
        struct socket *newsock;
        struct sock *newsk;
-       struct file *newfile;
-       int err, newfd;
 
-       err = -ENFILE;
        newsock = sock_alloc();
        if (!newsock)
-               goto out;
+               return ERR_PTR(-ENFILE);
 
        newsock->type = osock->type;
        newsock->ops = osock->ops;
 
        __module_get(newsock->ops->owner);
 
-       newfd = get_unused_fd_flags(0);
-       if (unlikely(newfd < 0)) {
-               err = newfd;
-               goto out_fd_fail;
-       }
-
-       newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
-       if (IS_ERR(newfile)) {
-               err = PTR_ERR(newfile);
-               goto out_sock_alloc_fail;
-       }
-
        newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
                         &kcm_proto, true);
        if (!newsk) {
-               err = -ENOMEM;
-               goto out_sk_alloc_fail;
+               sock_release(newsock);
+               return ERR_PTR(-ENOMEM);
        }
-
        sock_init_data(newsock, newsk);
        init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
 
-       fd_install(newfd, newfile);
-       *newsockp = newsock;
-       info->fd = newfd;
-
-       return 0;
-
-out_sk_alloc_fail:
-       fput(newfile);
-out_sock_alloc_fail:
-       put_unused_fd(newfd);
-out_fd_fail:
-       sock_release(newsock);
-out:
-       return err;
+       return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
 }
 
 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -1708,17 +1695,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        }
        case SIOCKCMCLONE: {
                struct kcm_clone info;
-               struct socket *newsock = NULL;
-
-               err = kcm_clone(sock, &info, &newsock);
-               if (!err) {
-                       if (copy_to_user((void __user *)arg, &info,
-                                        sizeof(info))) {
-                               err = -EFAULT;
-                               sys_close(info.fd);
-                       }
-               }
+               struct file *file;
 
+               info.fd = get_unused_fd_flags(0);
+               if (unlikely(info.fd < 0))
+                       return info.fd;
+
+               file = kcm_clone(sock);
+               if (IS_ERR(file)) {
+                       put_unused_fd(info.fd);
+                       return PTR_ERR(file);
+               }
+               if (copy_to_user((void __user *)arg, &info,
+                                sizeof(info))) {
+                       put_unused_fd(info.fd);
+                       fput(file);
+                       return -EFAULT;
+               }
+               fd_install(info.fd, file);
+               err = 0;
                break;
        }
        default:
index 3dffb89..7e2e718 100644 (file)
@@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
 #endif
        int len;
 
+       if (sp->sadb_address_len <
+           DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
+                        sizeof(uint64_t)))
+               return -EINVAL;
+
        switch (addr->sa_family) {
        case AF_INET:
                len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
                uint16_t ext_type;
                int ext_len;
 
+               if (len < sizeof(*ehdr))
+                       return -EINVAL;
+
                ext_len  = ehdr->sadb_ext_len;
                ext_len *= sizeof(uint64_t);
                ext_type = ehdr->sadb_ext_type;
@@ -2194,8 +2202,10 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
                return PTR_ERR(out_skb);
 
        err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(out_skb);
                return err;
+       }
 
        out_hdr = (struct sadb_msg *) out_skb->data;
        out_hdr->sadb_msg_version = PF_KEY_V2;
index 41f5e48..1621b6a 100644 (file)
@@ -291,13 +291,14 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
        int i;
 
        mutex_lock(&sta->ampdu_mlme.mtx);
-       for (i = 0; i <  IEEE80211_NUM_TIDS; i++) {
-               ___ieee80211_stop_tx_ba_session(sta, i, reason);
+       for (i = 0; i <  IEEE80211_NUM_TIDS; i++)
                ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
                                                WLAN_REASON_QSTA_LEAVE_QBSS,
                                                reason != AGG_STOP_DESTROY_STA &&
                                                reason != AGG_STOP_PEER_REQUEST);
-       }
+
+       for (i = 0; i <  IEEE80211_NUM_TIDS; i++)
+               ___ieee80211_stop_tx_ba_session(sta, i, reason);
        mutex_unlock(&sta->ampdu_mlme.mtx);
 
        /* stopping might queue the work again - so cancel only afterwards */
index 4f7826d..4394463 100644 (file)
@@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
        struct mesh_path *mpath;
        u8 ttl, flags, hopcount;
        const u8 *orig_addr;
-       u32 orig_sn, metric, metric_txsta, interval;
+       u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
        bool root_is_gate;
 
        ttl = rann->rann_ttl;
@@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
        interval = le32_to_cpu(rann->rann_interval);
        hopcount = rann->rann_hopcount;
        hopcount++;
-       metric = le32_to_cpu(rann->rann_metric);
+       orig_metric = le32_to_cpu(rann->rann_metric);
 
        /*  Ignore our own RANNs */
        if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
                return;
        }
 
-       metric_txsta = airtime_link_metric_get(local, sta);
+       last_hop_metric = airtime_link_metric_get(local, sta);
+       new_metric = orig_metric + last_hop_metric;
+       if (new_metric < orig_metric)
+               new_metric = MAX_METRIC;
 
        mpath = mesh_path_lookup(sdata, orig_addr);
        if (!mpath) {
@@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
        }
 
        if (!(SN_LT(mpath->sn, orig_sn)) &&
-           !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
+           !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
                rcu_read_unlock();
                return;
        }
@@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
        }
 
        mpath->sn = orig_sn;
-       mpath->rann_metric = metric + metric_txsta;
+       mpath->rann_metric = new_metric;
        mpath->is_root = true;
        /* Recording RANNs sender address to send individually
         * addressed PREQs destined for root mesh STA */
@@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
                mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
                                       orig_sn, 0, NULL, 0, broadcast_addr,
                                       hopcount, ttl, interval,
-                                      metric + metric_txsta, 0, sdata);
+                                      new_metric, 0, sdata);
        }
 
        rcu_read_unlock();
index 0446044..c244691 100644 (file)
@@ -895,7 +895,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
        struct ieee80211_hdr_3addr *nullfunc;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-       skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
+       skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
        if (!skb)
                return;
 
index 70e9d2c..4daafb0 100644 (file)
@@ -3632,6 +3632,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                }
                return true;
        case NL80211_IFTYPE_MESH_POINT:
+               if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
+                       return false;
                if (multicast)
                        return true;
                return ether_addr_equal(sdata->vif.addr, hdr->addr1);
index 7b81544..3160954 100644 (file)
@@ -4438,13 +4438,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
 EXPORT_SYMBOL(ieee80211_pspoll_get);
 
 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif)
+                                      struct ieee80211_vif *vif,
+                                      bool qos_ok)
 {
        struct ieee80211_hdr_3addr *nullfunc;
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_if_managed *ifmgd;
        struct ieee80211_local *local;
        struct sk_buff *skb;
+       bool qos = false;
 
        if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
                return NULL;
@@ -4453,7 +4455,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
        ifmgd = &sdata->u.mgd;
        local = sdata->local;
 
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
+       if (qos_ok) {
+               struct sta_info *sta;
+
+               rcu_read_lock();
+               sta = sta_info_get(sdata, ifmgd->bssid);
+               qos = sta && sta->sta.wme;
+               rcu_read_unlock();
+       }
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+                           sizeof(*nullfunc) + 2);
        if (!skb)
                return NULL;
 
@@ -4463,6 +4475,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
        nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                              IEEE80211_STYPE_NULLFUNC |
                                              IEEE80211_FCTL_TODS);
+       if (qos) {
+               __le16 qos = cpu_to_le16(7);
+
+               BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
+                             IEEE80211_STYPE_NULLFUNC) !=
+                            IEEE80211_STYPE_QOS_NULLFUNC);
+               nullfunc->frame_control |=
+                       cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
+               skb->priority = 7;
+               skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+               skb_put_data(skb, &qos, sizeof(qos));
+       }
+
        memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
        memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
        memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
index cf1bf26..dc63473 100644 (file)
@@ -103,7 +103,6 @@ struct bitstr {
 #define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;}
 #define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
 #define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
-#define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND)
 static unsigned int get_len(struct bitstr *bs);
 static unsigned int get_bit(struct bitstr *bs);
 static unsigned int get_bits(struct bitstr *bs, unsigned int b);
@@ -165,6 +164,19 @@ static unsigned int get_len(struct bitstr *bs)
        return v;
 }
 
+static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
+{
+       bits += bs->bit;
+       bytes += bits / BITS_PER_BYTE;
+       if (bits % BITS_PER_BYTE > 0)
+               bytes++;
+
+       if (*bs->cur + bytes > *bs->end)
+               return 1;
+
+       return 0;
+}
+
 /****************************************************************************/
 static unsigned int get_bit(struct bitstr *bs)
 {
@@ -279,8 +291,8 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f,
        PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
 
        INC_BIT(bs);
-
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -293,11 +305,14 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f,
        PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
 
        BYTE_ALIGN(bs);
-       CHECK_BOUND(bs, 1);
+       if (nf_h323_error_boundary(bs, 1, 0))
+               return H323_ERROR_BOUND;
+
        len = *bs->cur++;
        bs->cur += len;
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
 
-       CHECK_BOUND(bs, 0);
        return H323_ERROR_NONE;
 }
 
@@ -319,6 +334,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
                bs->cur += 2;
                break;
        case CONS:              /* 64K < Range < 4G */
+               if (nf_h323_error_boundary(bs, 0, 2))
+                       return H323_ERROR_BOUND;
                len = get_bits(bs, 2) + 1;
                BYTE_ALIGN(bs);
                if (base && (f->attr & DECODE)) {       /* timeToLive */
@@ -330,7 +347,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
                break;
        case UNCO:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
                bs->cur += len;
                break;
@@ -341,7 +359,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
 
        PRINT("\n");
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -357,7 +376,8 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f,
                INC_BITS(bs, f->sz);
        }
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -375,12 +395,14 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
                len = f->lb;
                break;
        case WORD:              /* 2-byte length */
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = (*bs->cur++) << 8;
                len += (*bs->cur++) + f->lb;
                break;
        case SEMI:
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
                break;
        default:
@@ -391,7 +413,8 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
        bs->cur += len >> 3;
        bs->bit = len & 7;
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -404,12 +427,15 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f,
        PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
 
        /* 2 <= Range <= 255 */
+       if (nf_h323_error_boundary(bs, 0, f->sz))
+               return H323_ERROR_BOUND;
        len = get_bits(bs, f->sz) + f->lb;
 
        BYTE_ALIGN(bs);
        INC_BITS(bs, (len << 2));
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -440,15 +466,19 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
                break;
        case BYTE:              /* Range == 256 */
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 1);
+               if (nf_h323_error_boundary(bs, 1, 0))
+                       return H323_ERROR_BOUND;
                len = (*bs->cur++) + f->lb;
                break;
        case SEMI:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs) + f->lb;
                break;
        default:                /* 2 <= Range <= 255 */
+               if (nf_h323_error_boundary(bs, 0, f->sz))
+                       return H323_ERROR_BOUND;
                len = get_bits(bs, f->sz) + f->lb;
                BYTE_ALIGN(bs);
                break;
@@ -458,7 +488,8 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
 
        PRINT("\n");
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -473,10 +504,13 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
        switch (f->sz) {
        case BYTE:              /* Range == 256 */
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 1);
+               if (nf_h323_error_boundary(bs, 1, 0))
+                       return H323_ERROR_BOUND;
                len = (*bs->cur++) + f->lb;
                break;
        default:                /* 2 <= Range <= 255 */
+               if (nf_h323_error_boundary(bs, 0, f->sz))
+                       return H323_ERROR_BOUND;
                len = get_bits(bs, f->sz) + f->lb;
                BYTE_ALIGN(bs);
                break;
@@ -484,7 +518,8 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
 
        bs->cur += len << 1;
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -503,9 +538,13 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
        base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
 
        /* Extensible? */
+       if (nf_h323_error_boundary(bs, 0, 1))
+               return H323_ERROR_BOUND;
        ext = (f->attr & EXT) ? get_bit(bs) : 0;
 
        /* Get fields bitmap */
+       if (nf_h323_error_boundary(bs, 0, f->sz))
+               return H323_ERROR_BOUND;
        bmp = get_bitmap(bs, f->sz);
        if (base)
                *(unsigned int *)base = bmp;
@@ -525,9 +564,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
 
                /* Decode */
                if (son->attr & OPEN) { /* Open field */
-                       CHECK_BOUND(bs, 2);
+                       if (nf_h323_error_boundary(bs, 2, 0))
+                               return H323_ERROR_BOUND;
                        len = get_len(bs);
-                       CHECK_BOUND(bs, len);
+                       if (nf_h323_error_boundary(bs, len, 0))
+                               return H323_ERROR_BOUND;
                        if (!base || !(son->attr & DECODE)) {
                                PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
                                      " ", son->name);
@@ -555,8 +596,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
                return H323_ERROR_NONE;
 
        /* Get the extension bitmap */
+       if (nf_h323_error_boundary(bs, 0, 7))
+               return H323_ERROR_BOUND;
        bmp2_len = get_bits(bs, 7) + 1;
-       CHECK_BOUND(bs, (bmp2_len + 7) >> 3);
+       if (nf_h323_error_boundary(bs, 0, bmp2_len))
+               return H323_ERROR_BOUND;
        bmp2 = get_bitmap(bs, bmp2_len);
        bmp |= bmp2 >> f->sz;
        if (base)
@@ -567,9 +611,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
        for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
                /* Check Range */
                if (i >= f->ub) {       /* Newer Version? */
-                       CHECK_BOUND(bs, 2);
+                       if (nf_h323_error_boundary(bs, 2, 0))
+                               return H323_ERROR_BOUND;
                        len = get_len(bs);
-                       CHECK_BOUND(bs, len);
+                       if (nf_h323_error_boundary(bs, len, 0))
+                               return H323_ERROR_BOUND;
                        bs->cur += len;
                        continue;
                }
@@ -583,9 +629,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
                if (!((0x80000000 >> opt) & bmp2))      /* Not present */
                        continue;
 
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
-               CHECK_BOUND(bs, len);
+               if (nf_h323_error_boundary(bs, len, 0))
+                       return H323_ERROR_BOUND;
                if (!base || !(son->attr & DECODE)) {
                        PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
                              son->name);
@@ -623,22 +671,27 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
        switch (f->sz) {
        case BYTE:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 1);
+               if (nf_h323_error_boundary(bs, 1, 0))
+                       return H323_ERROR_BOUND;
                count = *bs->cur++;
                break;
        case WORD:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                count = *bs->cur++;
                count <<= 8;
                count += *bs->cur++;
                break;
        case SEMI:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                count = get_len(bs);
                break;
        default:
+               if (nf_h323_error_boundary(bs, 0, f->sz))
+                       return H323_ERROR_BOUND;
                count = get_bits(bs, f->sz);
                break;
        }
@@ -658,8 +711,11 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
        for (i = 0; i < count; i++) {
                if (son->attr & OPEN) {
                        BYTE_ALIGN(bs);
+                       if (nf_h323_error_boundary(bs, 2, 0))
+                               return H323_ERROR_BOUND;
                        len = get_len(bs);
-                       CHECK_BOUND(bs, len);
+                       if (nf_h323_error_boundary(bs, len, 0))
+                               return H323_ERROR_BOUND;
                        if (!base || !(son->attr & DECODE)) {
                                PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
                                      " ", son->name);
@@ -710,11 +766,17 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
        base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
 
        /* Decode the choice index number */
+       if (nf_h323_error_boundary(bs, 0, 1))
+               return H323_ERROR_BOUND;
        if ((f->attr & EXT) && get_bit(bs)) {
                ext = 1;
+               if (nf_h323_error_boundary(bs, 0, 7))
+                       return H323_ERROR_BOUND;
                type = get_bits(bs, 7) + f->lb;
        } else {
                ext = 0;
+               if (nf_h323_error_boundary(bs, 0, f->sz))
+                       return H323_ERROR_BOUND;
                type = get_bits(bs, f->sz);
                if (type >= f->lb)
                        return H323_ERROR_RANGE;
@@ -727,8 +789,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
        /* Check Range */
        if (type >= f->ub) {    /* Newer version? */
                BYTE_ALIGN(bs);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
-               CHECK_BOUND(bs, len);
+               if (nf_h323_error_boundary(bs, len, 0))
+                       return H323_ERROR_BOUND;
                bs->cur += len;
                return H323_ERROR_NONE;
        }
@@ -742,8 +807,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
 
        if (ext || (son->attr & OPEN)) {
                BYTE_ALIGN(bs);
+               if (nf_h323_error_boundary(bs, len, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
-               CHECK_BOUND(bs, len);
+               if (nf_h323_error_boundary(bs, len, 0))
+                       return H323_ERROR_BOUND;
                if (!base || !(son->attr & DECODE)) {
                        PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
                              son->name);
index 59c0899..382d497 100644 (file)
@@ -45,7 +45,6 @@
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/nf_conntrack_timestamp.h>
 #include <net/netfilter/nf_conntrack_labels.h>
-#include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_synproxy.h>
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_core.h>
@@ -1566,9 +1565,11 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
 static int ctnetlink_change_timeout(struct nf_conn *ct,
                                    const struct nlattr * const cda[])
 {
-       u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
+       u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
 
-       ct->timeout = nfct_time_stamp + timeout * HZ;
+       if (timeout > INT_MAX)
+               timeout = INT_MAX;
+       ct->timeout = nfct_time_stamp + (u32)timeout;
 
        if (test_bit(IPS_DYING_BIT, &ct->status))
                return -ETIME;
@@ -1768,6 +1769,7 @@ ctnetlink_create_conntrack(struct net *net,
        int err = -EINVAL;
        struct nf_conntrack_helper *helper;
        struct nf_conn_tstamp *tstamp;
+       u64 timeout;
 
        ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
        if (IS_ERR(ct))
@@ -1776,7 +1778,10 @@ ctnetlink_create_conntrack(struct net *net,
        if (!cda[CTA_TIMEOUT])
                goto err1;
 
-       ct->timeout = nfct_time_stamp + ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
+       timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
+       if (timeout > INT_MAX)
+               timeout = INT_MAX;
+       ct->timeout = (u32)timeout + nfct_time_stamp;
 
        rcu_read_lock();
        if (cda[CTA_HELP]) {
index b12fc07..37ef35b 100644 (file)
@@ -1039,6 +1039,9 @@ static int tcp_packet(struct nf_conn *ct,
                 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
                 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
                timeout = timeouts[TCP_CONNTRACK_UNACK];
+       else if (ct->proto.tcp.last_win == 0 &&
+                timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
+               timeout = timeouts[TCP_CONNTRACK_RETRANS];
        else
                timeout = timeouts[new_state];
        spin_unlock_bh(&ct->lock);
index d8327b4..07bd413 100644 (file)
@@ -2072,7 +2072,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
                                continue;
 
                        list_for_each_entry_rcu(chain, &table->chains, list) {
-                               if (ctx && ctx->chain[0] &&
+                               if (ctx && ctx->chain &&
                                    strcmp(ctx->chain, chain->name) != 0)
                                        continue;
 
@@ -4665,8 +4665,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
 {
        struct nft_obj_filter *filter = cb->data;
 
-       kfree(filter->table);
-       kfree(filter);
+       if (filter) {
+               kfree(filter->table);
+               kfree(filter);
+       }
 
        return 0;
 }
@@ -5847,6 +5849,12 @@ static int __net_init nf_tables_init_net(struct net *net)
        return 0;
 }
 
+static void __net_exit nf_tables_exit_net(struct net *net)
+{
+       WARN_ON_ONCE(!list_empty(&net->nft.af_info));
+       WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+}
+
 int __nft_release_basechain(struct nft_ctx *ctx)
 {
        struct nft_rule *rule, *nr;
@@ -5917,6 +5925,7 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
 
 static struct pernet_operations nf_tables_net_ops = {
        .init   = nf_tables_init_net,
+       .exit   = nf_tables_exit_net,
 };
 
 static int __init nf_tables_module_init(void)
index 41628b3..d33ce6d 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/errno.h>
+#include <linux/capability.h>
 #include <net/netlink.h>
 #include <net/sock.h>
 
@@ -407,6 +408,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
        struct nfnl_cthelper *nlcth;
        int ret = 0;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
                return -EINVAL;
 
@@ -611,6 +615,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
        struct nfnl_cthelper *nlcth;
        bool tuple_set = false;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
                        .dump = nfnl_cthelper_dump_table,
@@ -678,6 +685,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
        struct nfnl_cthelper *nlcth, *n;
        int j = 0, ret;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (tb[NFCTH_NAME])
                helper_name = nla_data(tb[NFCTH_NAME]);
 
index e5afab8..e955bec 100644 (file)
@@ -1093,10 +1093,15 @@ static int __net_init nfnl_log_net_init(struct net *net)
 
 static void __net_exit nfnl_log_net_exit(struct net *net)
 {
+       struct nfnl_log_net *log = nfnl_log_pernet(net);
+       unsigned int i;
+
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
 #endif
        nf_log_unset(net, &nfulnl_logger);
+       for (i = 0; i < INSTANCE_BUCKETS; i++)
+               WARN_ON_ONCE(!hlist_empty(&log->instance_table[i]));
 }
 
 static struct pernet_operations nfnl_log_net_ops = {
index a16356c..c09b367 100644 (file)
@@ -1512,10 +1512,15 @@ static int __net_init nfnl_queue_net_init(struct net *net)
 
 static void __net_exit nfnl_queue_net_exit(struct net *net)
 {
+       struct nfnl_queue_net *q = nfnl_queue_pernet(net);
+       unsigned int i;
+
        nf_unregister_queue_handler(net);
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
 #endif
+       for (i = 0; i < INSTANCE_BUCKETS; i++)
+               WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
 }
 
 static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
index a0a93d9..47ec104 100644 (file)
@@ -214,6 +214,8 @@ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
        [NFTA_EXTHDR_OFFSET]            = { .type = NLA_U32 },
        [NFTA_EXTHDR_LEN]               = { .type = NLA_U32 },
        [NFTA_EXTHDR_FLAGS]             = { .type = NLA_U32 },
+       [NFTA_EXTHDR_OP]                = { .type = NLA_U32 },
+       [NFTA_EXTHDR_SREG]              = { .type = NLA_U32 },
 };
 
 static int nft_exthdr_init(const struct nft_ctx *ctx,
index a77dd51..55802e9 100644 (file)
@@ -1729,8 +1729,17 @@ static int __net_init xt_net_init(struct net *net)
        return 0;
 }
 
+static void __net_exit xt_net_exit(struct net *net)
+{
+       int i;
+
+       for (i = 0; i < NFPROTO_NUMPROTO; i++)
+               WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
+}
+
 static struct pernet_operations xt_net_ops = {
        .init = xt_net_init,
+       .exit = xt_net_exit,
 };
 
 static int __init xt_init(void)
index 041da0d..06b090d 100644 (file)
@@ -27,6 +27,9 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
 {
        struct sock_fprog_kern program;
 
+       if (len > XT_BPF_MAX_NUM_INSTR)
+               return -EINVAL;
+
        program.len = len;
        program.filter = insns;
 
@@ -52,18 +55,11 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
 
 static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
 {
-       mm_segment_t oldfs = get_fs();
-       int retval, fd;
-
-       set_fs(KERNEL_DS);
-       fd = bpf_obj_get_user(path, 0);
-       set_fs(oldfs);
-       if (fd < 0)
-               return fd;
-
-       retval = __bpf_mt_check_fd(fd, ret);
-       sys_close(fd);
-       return retval;
+       if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
+               return -EINVAL;
+
+       *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER);
+       return PTR_ERR_OR_ZERO(*ret);
 }
 
 static int bpf_mt_check(const struct xt_mtchk_param *par)
index 36e14b1..a34f314 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 
+#include <linux/capability.h>
 #include <linux/if.h>
 #include <linux/inetdevice.h>
 #include <linux/ip.h>
@@ -70,6 +71,9 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
        struct xt_osf_finger *kf = NULL, *sf;
        int err = 0;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (!osf_attrs[OSF_ATTR_FINGER])
                return -EINVAL;
 
@@ -115,6 +119,9 @@ static int xt_osf_remove_callback(struct net *net, struct sock *ctnl,
        struct xt_osf_finger *sf;
        int err = -ENOENT;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (!osf_attrs[OSF_ATTR_FINGER])
                return -EINVAL;
 
index b9e0ee4..84a4e4c 100644 (file)
@@ -253,6 +253,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
        struct sock *sk = skb->sk;
        int ret = -ENOMEM;
 
+       if (!net_eq(dev_net(dev), sock_net(sk)))
+               return 0;
+
        dev_hold(dev);
 
        if (is_vmalloc_addr(skb->head))
@@ -2381,13 +2384,14 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
                                                   struct nlmsghdr *,
                                                   struct netlink_ext_ack *))
 {
-       struct netlink_ext_ack extack = {};
+       struct netlink_ext_ack extack;
        struct nlmsghdr *nlh;
        int err;
 
        while (skb->len >= nlmsg_total_size(0)) {
                int msglen;
 
+               memset(&extack, 0, sizeof(extack));
                nlh = nlmsg_hdr(skb);
                err = 0;
 
index 99cfafc..ef38e5a 100644 (file)
@@ -308,7 +308,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
                             const struct dp_upcall_info *upcall_info,
                                 uint32_t cutlen)
 {
-       unsigned short gso_type = skb_shinfo(skb)->gso_type;
+       unsigned int gso_type = skb_shinfo(skb)->gso_type;
        struct sw_flow_key later_key;
        struct sk_buff *segs, *nskb;
        int err;
index dbe2379..f039064 100644 (file)
@@ -579,6 +579,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                        return -EINVAL;
 
                skb_reset_network_header(skb);
+               key->eth.type = skb->protocol;
        } else {
                eth = eth_hdr(skb);
                ether_addr_copy(key->eth.src, eth->h_source);
@@ -592,15 +593,23 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                if (unlikely(parse_vlan(skb, key)))
                        return -ENOMEM;
 
-               skb->protocol = parse_ethertype(skb);
-               if (unlikely(skb->protocol == htons(0)))
+               key->eth.type = parse_ethertype(skb);
+               if (unlikely(key->eth.type == htons(0)))
                        return -ENOMEM;
 
+               /* Multiple tagged packets need to retain TPID to satisfy
+                * skb_vlan_pop(), which will later shift the ethertype into
+                * skb->protocol.
+                */
+               if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
+                       skb->protocol = key->eth.cvlan.tpid;
+               else
+                       skb->protocol = key->eth.type;
+
                skb_reset_network_header(skb);
                __skb_push(skb, skb->data - skb_mac_header(skb));
        }
        skb_reset_mac_len(skb);
-       key->eth.type = skb->protocol;
 
        /* Network layer. */
        if (key->eth.type == htons(ETH_P_IP)) {
index dc42479..f143908 100644 (file)
@@ -49,7 +49,6 @@
 #include <net/mpls.h>
 #include <net/vxlan.h>
 #include <net/tun_proto.h>
-#include <net/erspan.h>
 
 #include "flow_netlink.h"
 
@@ -334,8 +333,7 @@ size_t ovs_tun_key_attr_size(void)
                 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
                 */
                + nla_total_size(2)    /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
-               + nla_total_size(2)    /* OVS_TUNNEL_KEY_ATTR_TP_DST */
-               + nla_total_size(4);   /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */
+               + nla_total_size(2);   /* OVS_TUNNEL_KEY_ATTR_TP_DST */
 }
 
 static size_t ovs_nsh_key_attr_size(void)
@@ -402,7 +400,6 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
                                                .next = ovs_vxlan_ext_key_lens },
        [OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
        [OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
-       [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS]   = { .len = sizeof(u32) },
 };
 
 static const struct ovs_len_tbl
@@ -634,33 +631,6 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
        return 0;
 }
 
-static int erspan_tun_opt_from_nlattr(const struct nlattr *attr,
-                                     struct sw_flow_match *match, bool is_mask,
-                                     bool log)
-{
-       unsigned long opt_key_offset;
-       struct erspan_metadata opts;
-
-       BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
-
-       memset(&opts, 0, sizeof(opts));
-       opts.index = nla_get_be32(attr);
-
-       /* Index has only 20-bit */
-       if (ntohl(opts.index) & ~INDEX_MASK) {
-               OVS_NLERR(log, "ERSPAN index number %x too large.",
-                         ntohl(opts.index));
-               return -EINVAL;
-       }
-
-       SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), is_mask);
-       opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
-       SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
-                                 is_mask);
-
-       return 0;
-}
-
 static int ip_tun_from_nlattr(const struct nlattr *attr,
                              struct sw_flow_match *match, bool is_mask,
                              bool log)
@@ -768,19 +738,6 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
                        break;
                case OVS_TUNNEL_KEY_ATTR_PAD:
                        break;
-               case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
-                       if (opts_type) {
-                               OVS_NLERR(log, "Multiple metadata blocks provided");
-                               return -EINVAL;
-                       }
-
-                       err = erspan_tun_opt_from_nlattr(a, match, is_mask, log);
-                       if (err)
-                               return err;
-
-                       tun_flags |= TUNNEL_ERSPAN_OPT;
-                       opts_type = type;
-                       break;
                default:
                        OVS_NLERR(log, "Unknown IP tunnel attribute %d",
                                  type);
@@ -905,10 +862,6 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
                else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
                         vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
                        return -EMSGSIZE;
-               else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
-                        nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
-                                     ((struct erspan_metadata *)tun_opts)->index))
-                       return -EMSGSIZE;
        }
 
        return 0;
@@ -2241,14 +2194,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
 
 #define MAX_ACTIONS_BUFSIZE    (32 * 1024)
 
-static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
+static struct sw_flow_actions *nla_alloc_flow_actions(int size)
 {
        struct sw_flow_actions *sfa;
 
-       if (size > MAX_ACTIONS_BUFSIZE) {
-               OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
-               return ERR_PTR(-EINVAL);
-       }
+       WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
 
        sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
        if (!sfa)
@@ -2321,12 +2271,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
        new_acts_size = ksize(*sfa) * 2;
 
        if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
-               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
+               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+                       OVS_NLERR(log, "Flow action size exceeds max %u",
+                                 MAX_ACTIONS_BUFSIZE);
                        return ERR_PTR(-EMSGSIZE);
+               }
                new_acts_size = MAX_ACTIONS_BUFSIZE;
        }
 
-       acts = nla_alloc_flow_actions(new_acts_size, log);
+       acts = nla_alloc_flow_actions(new_acts_size);
        if (IS_ERR(acts))
                return (void *)acts;
 
@@ -2533,8 +2486,6 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
                        break;
                case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
                        break;
-               case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
-                       break;
                }
        };
 
@@ -3059,7 +3010,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 {
        int err;
 
-       *sfa = nla_alloc_flow_actions(nla_len(attr), log);
+       *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
        if (IS_ERR(*sfa))
                return PTR_ERR(*sfa);
 
index 737092c..da215e5 100644 (file)
@@ -1687,7 +1687,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                atomic_long_set(&rollover->num, 0);
                atomic_long_set(&rollover->num_huge, 0);
                atomic_long_set(&rollover->num_failed, 0);
-               po->rollover = rollover;
        }
 
        if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
@@ -1745,6 +1744,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
                        __dev_remove_pack(&po->prot_hook);
                        po->fanout = match;
+                       po->rollover = rollover;
+                       rollover = NULL;
                        refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
                        __fanout_link(sk, po);
                        err = 0;
@@ -1758,10 +1759,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
        }
 
 out:
-       if (err && rollover) {
-               kfree_rcu(rollover, rcu);
-               po->rollover = NULL;
-       }
+       kfree(rollover);
        mutex_unlock(&fanout_mutex);
        return err;
 }
@@ -1785,11 +1783,6 @@ static struct packet_fanout *fanout_release(struct sock *sk)
                        list_del(&f->list);
                else
                        f = NULL;
-
-               if (po->rollover) {
-                       kfree_rcu(po->rollover, rcu);
-                       po->rollover = NULL;
-               }
        }
        mutex_unlock(&fanout_mutex);
 
@@ -3029,6 +3022,7 @@ static int packet_release(struct socket *sock)
        synchronize_net();
 
        if (f) {
+               kfree(po->rollover);
                fanout_release_data(f);
                kfree(f);
        }
@@ -3097,6 +3091,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
        if (need_rehook) {
                if (po->running) {
                        rcu_read_unlock();
+                       /* prevents packet_notifier() from calling
+                        * register_prot_hook()
+                        */
+                       po->num = 0;
                        __unregister_prot_hook(sk, true);
                        rcu_read_lock();
                        dev_curr = po->prot_hook.dev;
@@ -3105,6 +3103,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
                                                                 dev->ifindex);
                }
 
+               BUG_ON(po->running);
                po->num = proto;
                po->prot_hook.type = proto;
 
@@ -3843,7 +3842,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        void *data = &val;
        union tpacket_stats_u st;
        struct tpacket_rollover_stats rstats;
-       struct packet_rollover *rollover;
 
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
@@ -3922,18 +3920,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                       0);
                break;
        case PACKET_ROLLOVER_STATS:
-               rcu_read_lock();
-               rollover = rcu_dereference(po->rollover);
-               if (rollover) {
-                       rstats.tp_all = atomic_long_read(&rollover->num);
-                       rstats.tp_huge = atomic_long_read(&rollover->num_huge);
-                       rstats.tp_failed = atomic_long_read(&rollover->num_failed);
-                       data = &rstats;
-                       lv = sizeof(rstats);
-               }
-               rcu_read_unlock();
-               if (!rollover)
+               if (!po->rollover)
                        return -EINVAL;
+               rstats.tp_all = atomic_long_read(&po->rollover->num);
+               rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+               rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+               data = &rstats;
+               lv = sizeof(rstats);
                break;
        case PACKET_TX_HAS_OFF:
                val = po->tp_tx_has_off;
index 562fbc1..a1d2b23 100644 (file)
@@ -95,7 +95,6 @@ struct packet_fanout {
 
 struct packet_rollover {
        int                     sock;
-       struct rcu_head         rcu;
        atomic_long_t           num;
        atomic_long_t           num_huge;
        atomic_long_t           num_failed;
index 8886f15..634cfcb 100644 (file)
@@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
        long i;
        int ret;
 
-       if (rs->rs_bound_addr == 0) {
+       if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
                ret = -ENOTCONN; /* XXX not a great errno */
                goto out;
        }
@@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
 
        local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
 
+       if (args->nr_local == 0)
+               return -EINVAL;
+
        /* figure out the number of pages in the vector */
        for (i = 0; i < args->nr_local; i++) {
                if (copy_from_user(&vec, &local_vec[i],
@@ -874,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
 err:
        if (page)
                put_page(page);
+       rm->atomic.op_active = 0;
        kfree(rm->atomic.op_notifier);
 
        return ret;
index b52cdc8..f72466c 100644 (file)
@@ -1009,6 +1009,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
                        continue;
 
                if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
+                       if (cmsg->cmsg_len <
+                           CMSG_LEN(sizeof(struct rds_rdma_args)))
+                               return -EINVAL;
                        args = CMSG_DATA(cmsg);
                        *rdma_bytes += args->remote_vec.bytes;
                }
index 6b7ee71..ab7356e 100644 (file)
@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
                              sizeof(val));
 }
 
-u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
+u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
 {
-       return tcp_sk(tc->t_sock->sk)->snd_nxt;
+       /* seq# of the last byte of data in tcp send buffer */
+       return tcp_sk(tc->t_sock->sk)->write_seq;
 }
 
 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
index 1aafbf7..864ca7d 100644 (file)
@@ -54,7 +54,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
 void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
 void rds_tcp_restore_callbacks(struct socket *sock,
                               struct rds_tcp_connection *tc);
-u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
+u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
 u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
 extern struct rds_transport rds_tcp_transport;
index dc860d1..9b76e0f 100644 (file)
@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
                 * m_ack_seq is set to the sequence number of the last byte of
                 * header and data.  see rds_tcp_is_acked().
                 */
-               tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc);
+               tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
                rm->m_ack_seq = tc->t_last_sent_nxt +
                                sizeof(struct rds_header) +
                                be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
                        rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
 
                rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
-                        rm, rds_tcp_snd_nxt(tc),
+                        rm, rds_tcp_write_seq(tc),
                         (unsigned long long)rm->m_ack_seq);
        }
 
index 9b5c46b..dcd818f 100644 (file)
@@ -285,6 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
                                           bool upgrade)
 {
        struct rxrpc_conn_parameters cp;
+       struct rxrpc_call_params p;
        struct rxrpc_call *call;
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
        int ret;
@@ -302,6 +303,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        if (key && !key->payload.data[0])
                key = NULL; /* a no-security key */
 
+       memset(&p, 0, sizeof(p));
+       p.user_call_ID = user_call_ID;
+       p.tx_total_len = tx_total_len;
+
        memset(&cp, 0, sizeof(cp));
        cp.local                = rx->local;
        cp.key                  = key;
@@ -309,8 +314,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        cp.exclusive            = false;
        cp.upgrade              = upgrade;
        cp.service_id           = srx->srx_service;
-       call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
-                                    gfp);
+       call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
        /* The socket has been unlocked. */
        if (!IS_ERR(call)) {
                call->notify_rx = notify_rx;
@@ -856,6 +860,7 @@ static void rxrpc_sock_destructor(struct sock *sk)
 static int rxrpc_release_sock(struct sock *sk)
 {
        struct rxrpc_sock *rx = rxrpc_sk(sk);
+       struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 
        _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
@@ -863,6 +868,19 @@ static int rxrpc_release_sock(struct sock *sk)
        sock_orphan(sk);
        sk->sk_shutdown = SHUTDOWN_MASK;
 
+       /* We want to kill off all connections from a service socket
+        * as fast as possible because we can't share these; client
+        * sockets, on the other hand, can share an endpoint.
+        */
+       switch (sk->sk_state) {
+       case RXRPC_SERVER_BOUND:
+       case RXRPC_SERVER_BOUND2:
+       case RXRPC_SERVER_LISTENING:
+       case RXRPC_SERVER_LISTEN_DISABLED:
+               rx->local->service_closed = true;
+               break;
+       }
+
        spin_lock_bh(&sk->sk_receive_queue.lock);
        sk->sk_state = RXRPC_CLOSE;
        spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -878,6 +896,8 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_release_calls_on_socket(rx);
        flush_workqueue(rxrpc_workqueue);
        rxrpc_purge_queue(&sk->sk_receive_queue);
+       rxrpc_queue_work(&rxnet->service_conn_reaper);
+       rxrpc_queue_work(&rxnet->client_conn_reaper);
 
        rxrpc_put_local(rx->local);
        rx->local = NULL;
index b215199..4166883 100644 (file)
@@ -79,17 +79,20 @@ struct rxrpc_net {
        struct list_head        conn_proc_list; /* List of conns in this namespace for proc */
        struct list_head        service_conns;  /* Service conns in this namespace */
        rwlock_t                conn_lock;      /* Lock for ->conn_proc_list, ->service_conns */
-       struct delayed_work     service_conn_reaper;
+       struct work_struct      service_conn_reaper;
+       struct timer_list       service_conn_reap_timer;
 
        unsigned int            nr_client_conns;
        unsigned int            nr_active_client_conns;
        bool                    kill_all_client_conns;
+       bool                    live;
        spinlock_t              client_conn_cache_lock; /* Lock for ->*_client_conns */
        spinlock_t              client_conn_discard_lock; /* Prevent multiple discarders */
        struct list_head        waiting_client_conns;
        struct list_head        active_client_conns;
        struct list_head        idle_client_conns;
-       struct delayed_work     client_conn_reaper;
+       struct work_struct      client_conn_reaper;
+       struct timer_list       client_conn_reap_timer;
 
        struct list_head        local_endpoints;
        struct mutex            local_mutex;    /* Lock for ->local_endpoints */
@@ -265,6 +268,7 @@ struct rxrpc_local {
        rwlock_t                services_lock;  /* lock for services list */
        int                     debug_id;       /* debug ID for printks */
        bool                    dead;
+       bool                    service_closed; /* Service socket closed */
        struct sockaddr_rxrpc   srx;            /* local address */
 };
 
@@ -338,8 +342,17 @@ enum rxrpc_conn_flag {
        RXRPC_CONN_DONT_REUSE,          /* Don't reuse this connection */
        RXRPC_CONN_COUNTED,             /* Counted by rxrpc_nr_client_conns */
        RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
+       RXRPC_CONN_FINAL_ACK_0,         /* Need final ACK for channel 0 */
+       RXRPC_CONN_FINAL_ACK_1,         /* Need final ACK for channel 1 */
+       RXRPC_CONN_FINAL_ACK_2,         /* Need final ACK for channel 2 */
+       RXRPC_CONN_FINAL_ACK_3,         /* Need final ACK for channel 3 */
 };
 
+#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) |   \
+                                  (1UL << RXRPC_CONN_FINAL_ACK_1) |    \
+                                  (1UL << RXRPC_CONN_FINAL_ACK_2) |    \
+                                  (1UL << RXRPC_CONN_FINAL_ACK_3))
+
 /*
  * Events that can be raised upon a connection.
  */
@@ -393,6 +406,7 @@ struct rxrpc_connection {
 #define RXRPC_ACTIVE_CHANS_MASK        ((1 << RXRPC_MAXCALLS) - 1)
        struct list_head        waiting_calls;  /* Calls waiting for channels */
        struct rxrpc_channel {
+               unsigned long           final_ack_at;   /* Time at which to issue final ACK */
                struct rxrpc_call __rcu *call;          /* Active call */
                u32                     call_id;        /* ID of current call */
                u32                     call_counter;   /* Call ID counter */
@@ -404,6 +418,7 @@ struct rxrpc_connection {
                };
        } channels[RXRPC_MAXCALLS];
 
+       struct timer_list       timer;          /* Conn event timer */
        struct work_struct      processor;      /* connection event processor */
        union {
                struct rb_node  client_node;    /* Node in local->client_conns */
@@ -457,9 +472,10 @@ enum rxrpc_call_flag {
 enum rxrpc_call_event {
        RXRPC_CALL_EV_ACK,              /* need to generate ACK */
        RXRPC_CALL_EV_ABORT,            /* need to generate abort */
-       RXRPC_CALL_EV_TIMER,            /* Timer expired */
        RXRPC_CALL_EV_RESEND,           /* Tx resend required */
        RXRPC_CALL_EV_PING,             /* Ping send required */
+       RXRPC_CALL_EV_EXPIRED,          /* Expiry occurred */
+       RXRPC_CALL_EV_ACK_LOST,         /* ACK may be lost, send ping */
 };
 
 /*
@@ -503,10 +519,16 @@ struct rxrpc_call {
        struct rxrpc_peer       *peer;          /* Peer record for remote address */
        struct rxrpc_sock __rcu *socket;        /* socket responsible */
        struct mutex            user_mutex;     /* User access mutex */
-       ktime_t                 ack_at;         /* When deferred ACK needs to happen */
-       ktime_t                 resend_at;      /* When next resend needs to happen */
-       ktime_t                 ping_at;        /* When next to send a ping */
-       ktime_t                 expire_at;      /* When the call times out */
+       unsigned long           ack_at;         /* When deferred ACK needs to happen */
+       unsigned long           ack_lost_at;    /* When ACK is figured as lost */
+       unsigned long           resend_at;      /* When next resend needs to happen */
+       unsigned long           ping_at;        /* When next to send a ping */
+       unsigned long           keepalive_at;   /* When next to send a keepalive ping */
+       unsigned long           expect_rx_by;   /* When we expect to get a packet by */
+       unsigned long           expect_req_by;  /* When we expect to get a request DATA packet by */
+       unsigned long           expect_term_by; /* When we expect call termination by */
+       u32                     next_rx_timo;   /* Timeout for next Rx packet (jif) */
+       u32                     next_req_timo;  /* Timeout for next Rx request packet (jif) */
        struct timer_list       timer;          /* Combined event timer */
        struct work_struct      processor;      /* Event processor */
        rxrpc_notify_rx_t       notify_rx;      /* kernel service Rx notification function */
@@ -609,6 +631,8 @@ struct rxrpc_call {
        ktime_t                 acks_latest_ts; /* Timestamp of latest ACK received */
        rxrpc_serial_t          acks_latest;    /* serial number of latest ACK received */
        rxrpc_seq_t             acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
+       rxrpc_seq_t             acks_lost_top;  /* tx_top at the time lost-ack ping sent */
+       rxrpc_serial_t          acks_lost_ping; /* Serial number of probe ACK */
 };
 
 /*
@@ -632,6 +656,35 @@ struct rxrpc_ack_summary {
        u8                      cumulative_acks;
 };
 
+/*
+ * sendmsg() cmsg-specified parameters.
+ */
+enum rxrpc_command {
+       RXRPC_CMD_SEND_DATA,            /* send data message */
+       RXRPC_CMD_SEND_ABORT,           /* request abort generation */
+       RXRPC_CMD_ACCEPT,               /* [server] accept incoming call */
+       RXRPC_CMD_REJECT_BUSY,          /* [server] reject a call as busy */
+};
+
+struct rxrpc_call_params {
+       s64                     tx_total_len;   /* Total Tx data length (if send data) */
+       unsigned long           user_call_ID;   /* User's call ID */
+       struct {
+               u32             hard;           /* Maximum lifetime (sec) */
+               u32             idle;           /* Max time since last data packet (msec) */
+               u32             normal;         /* Max time since last call packet (msec) */
+       } timeouts;
+       u8                      nr_timeouts;    /* Number of timeouts specified */
+};
+
+struct rxrpc_send_params {
+       struct rxrpc_call_params call;
+       u32                     abort_code;     /* Abort code to Tx (if abort) */
+       enum rxrpc_command      command : 8;    /* The command to implement */
+       bool                    exclusive;      /* Shared or exclusive call */
+       bool                    upgrade;        /* If the connection is upgradeable */
+};
+
 #include <trace/events/rxrpc.h>
 
 /*
@@ -657,12 +710,19 @@ int rxrpc_reject_call(struct rxrpc_sock *);
 /*
  * call_event.c
  */
-void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
-void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
                       enum rxrpc_propose_ack_trace);
 void rxrpc_process_call(struct work_struct *);
 
+static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
+                                          unsigned long expire_at,
+                                          unsigned long now,
+                                          enum rxrpc_timer_trace why)
+{
+       trace_rxrpc_timer(call, why, now);
+       timer_reduce(&call->timer, expire_at);
+}
+
 /*
  * call_object.c
  */
@@ -672,11 +732,11 @@ extern unsigned int rxrpc_max_call_lifetime;
 extern struct kmem_cache *rxrpc_call_jar;
 
 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
-struct rxrpc_call *rxrpc_alloc_call(gfp_t);
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
                                         struct rxrpc_conn_parameters *,
                                         struct sockaddr_rxrpc *,
-                                        unsigned long, s64, gfp_t);
+                                        struct rxrpc_call_params *, gfp_t);
 int rxrpc_retry_client_call(struct rxrpc_sock *,
                            struct rxrpc_call *,
                            struct rxrpc_conn_parameters *,
@@ -803,8 +863,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
  */
 extern unsigned int rxrpc_max_client_connections;
 extern unsigned int rxrpc_reap_client_connections;
-extern unsigned int rxrpc_conn_idle_client_expiry;
-extern unsigned int rxrpc_conn_idle_client_fast_expiry;
+extern unsigned long rxrpc_conn_idle_client_expiry;
+extern unsigned long rxrpc_conn_idle_client_fast_expiry;
 extern struct idr rxrpc_client_conn_ids;
 
 void rxrpc_destroy_client_conn_ids(void);
@@ -825,6 +885,7 @@ void rxrpc_process_connection(struct work_struct *);
  * conn_object.c
  */
 extern unsigned int rxrpc_connection_expiry;
+extern unsigned int rxrpc_closed_conn_expiry;
 
 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
@@ -861,6 +922,12 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
                rxrpc_put_service_conn(conn);
 }
 
+static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
+                                          unsigned long expire_at)
+{
+       timer_reduce(&conn->timer, expire_at);
+}
+
 /*
  * conn_service.c
  */
@@ -930,13 +997,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local)
  * misc.c
  */
 extern unsigned int rxrpc_max_backlog __read_mostly;
-extern unsigned int rxrpc_requested_ack_delay;
-extern unsigned int rxrpc_soft_ack_delay;
-extern unsigned int rxrpc_idle_ack_delay;
+extern unsigned long rxrpc_requested_ack_delay;
+extern unsigned long rxrpc_soft_ack_delay;
+extern unsigned long rxrpc_idle_ack_delay;
 extern unsigned int rxrpc_rx_window_size;
 extern unsigned int rxrpc_rx_mtu;
 extern unsigned int rxrpc_rx_jumbo_max;
-extern unsigned int rxrpc_resend_timeout;
+extern unsigned long rxrpc_resend_timeout;
 
 extern const s8 rxrpc_ack_priority[];
 
@@ -954,7 +1021,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
 /*
  * output.c
  */
-int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
+int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
 int rxrpc_send_abort_packet(struct rxrpc_call *);
 int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
 void rxrpc_reject_packets(struct rxrpc_local *);
index cbd1701..3028298 100644 (file)
@@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
        /* Now it gets complicated, because calls get registered with the
         * socket here, particularly if a user ID is preassigned by the user.
         */
-       call = rxrpc_alloc_call(gfp);
+       call = rxrpc_alloc_call(rx, gfp);
        if (!call)
                return -ENOMEM;
        call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
index 3574508..ad2ab11 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-/*
- * Set the timer
- */
-void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
-                      ktime_t now)
-{
-       unsigned long t_j, now_j = jiffies;
-       ktime_t t;
-       bool queue = false;
-
-       if (call->state < RXRPC_CALL_COMPLETE) {
-               t = call->expire_at;
-               if (!ktime_after(t, now)) {
-                       trace_rxrpc_timer(call, why, now, now_j);
-                       queue = true;
-                       goto out;
-               }
-
-               if (!ktime_after(call->resend_at, now)) {
-                       call->resend_at = call->expire_at;
-                       if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
-                               queue = true;
-               } else if (ktime_before(call->resend_at, t)) {
-                       t = call->resend_at;
-               }
-
-               if (!ktime_after(call->ack_at, now)) {
-                       call->ack_at = call->expire_at;
-                       if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
-                               queue = true;
-               } else if (ktime_before(call->ack_at, t)) {
-                       t = call->ack_at;
-               }
-
-               if (!ktime_after(call->ping_at, now)) {
-                       call->ping_at = call->expire_at;
-                       if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
-                               queue = true;
-               } else if (ktime_before(call->ping_at, t)) {
-                       t = call->ping_at;
-               }
-
-               t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
-               t_j += jiffies;
-
-               /* We have to make sure that the calculated jiffies value falls
-                * at or after the nsec value, or we may loop ceaselessly
-                * because the timer times out, but we haven't reached the nsec
-                * timeout yet.
-                */
-               t_j++;
-
-               if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
-                       mod_timer(&call->timer, t_j);
-                       trace_rxrpc_timer(call, why, now, now_j);
-               }
-       }
-
-out:
-       if (queue)
-               rxrpc_queue_call(call);
-}
-
-/*
- * Set the timer
- */
-void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
-                    ktime_t now)
-{
-       read_lock_bh(&call->state_lock);
-       __rxrpc_set_timer(call, why, now);
-       read_unlock_bh(&call->state_lock);
-}
-
 /*
  * Propose a PING ACK be sent.
  */
@@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
                    !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
                        rxrpc_queue_call(call);
        } else {
-               ktime_t now = ktime_get_real();
-               ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
+               unsigned long now = jiffies;
+               unsigned long ping_at = now + rxrpc_idle_ack_delay;
 
-               if (ktime_before(ping_at, call->ping_at)) {
-                       call->ping_at = ping_at;
-                       rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
+               if (time_before(ping_at, call->ping_at)) {
+                       WRITE_ONCE(call->ping_at, ping_at);
+                       rxrpc_reduce_call_timer(call, ping_at, now,
+                                               rxrpc_timer_set_for_ping);
                }
        }
 }
@@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                                enum rxrpc_propose_ack_trace why)
 {
        enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
-       unsigned int expiry = rxrpc_soft_ack_delay;
-       ktime_t now, ack_at;
+       unsigned long expiry = rxrpc_soft_ack_delay;
        s8 prior = rxrpc_ack_priority[ack_reason];
 
        /* Pings are handled specially because we don't want to accidentally
@@ -190,11 +116,18 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                    background)
                        rxrpc_queue_call(call);
        } else {
-               now = ktime_get_real();
-               ack_at = ktime_add_ms(now, expiry);
-               if (ktime_before(ack_at, call->ack_at)) {
-                       call->ack_at = ack_at;
-                       rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
+               unsigned long now = jiffies, ack_at;
+
+               if (call->peer->rtt_usage > 0)
+                       ack_at = nsecs_to_jiffies(call->peer->rtt);
+               else
+                       ack_at = expiry;
+
+               ack_at += now;
+               if (time_before(ack_at, call->ack_at)) {
+                       WRITE_ONCE(call->ack_at, ack_at);
+                       rxrpc_reduce_call_timer(call, ack_at, now,
+                                               rxrpc_timer_set_for_ack);
                }
        }
 
@@ -227,18 +160,28 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
 /*
  * Perform retransmission of NAK'd and unack'd packets.
  */
-static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
+static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
 {
        struct rxrpc_skb_priv *sp;
        struct sk_buff *skb;
+       unsigned long resend_at;
        rxrpc_seq_t cursor, seq, top;
-       ktime_t max_age, oldest, ack_ts;
+       ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
        int ix;
        u8 annotation, anno_type, retrans = 0, unacked = 0;
 
        _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
 
-       max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
+       if (call->peer->rtt_usage > 1)
+               timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
+       else
+               timeout = ms_to_ktime(rxrpc_resend_timeout);
+       min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
+       if (ktime_before(timeout, min_timeo))
+               timeout = min_timeo;
+
+       now = ktime_get_real();
+       max_age = ktime_sub(now, timeout);
 
        spin_lock_bh(&call->lock);
 
@@ -282,7 +225,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
                                       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
        }
 
-       call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
+       resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
+       resend_at += jiffies + rxrpc_resend_timeout;
+       WRITE_ONCE(call->resend_at, resend_at);
 
        if (unacked)
                rxrpc_congestion_timeout(call);
@@ -292,14 +237,15 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
         * retransmitting data.
         */
        if (!retrans) {
-               rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+               rxrpc_reduce_call_timer(call, resend_at, now,
+                                       rxrpc_timer_set_for_resend);
                spin_unlock_bh(&call->lock);
                ack_ts = ktime_sub(now, call->acks_latest_ts);
                if (ktime_to_ns(ack_ts) < call->peer->rtt)
                        goto out;
                rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
                                  rxrpc_propose_ack_ping_for_lost_ack);
-               rxrpc_send_ack_packet(call, true);
+               rxrpc_send_ack_packet(call, true, NULL);
                goto out;
        }
 
@@ -364,7 +310,8 @@ void rxrpc_process_call(struct work_struct *work)
 {
        struct rxrpc_call *call =
                container_of(work, struct rxrpc_call, processor);
-       ktime_t now;
+       rxrpc_serial_t *send_ack;
+       unsigned long now, next, t;
 
        rxrpc_see_call(call);
 
@@ -384,22 +331,89 @@ recheck_state:
                goto out_put;
        }
 
-       now = ktime_get_real();
-       if (ktime_before(call->expire_at, now)) {
+       /* Work out if any timeouts tripped */
+       now = jiffies;
+       t = READ_ONCE(call->expect_rx_by);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
+               set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+       }
+
+       t = READ_ONCE(call->expect_req_by);
+       if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
+           time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
+               set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+       }
+
+       t = READ_ONCE(call->expect_term_by);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
+               set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+       }
+
+       t = READ_ONCE(call->ack_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
+               cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
+               set_bit(RXRPC_CALL_EV_ACK, &call->events);
+       }
+
+       t = READ_ONCE(call->ack_lost_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
+               cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
+               set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
+       }
+
+       t = READ_ONCE(call->keepalive_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
+               cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+               rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
+                                 rxrpc_propose_ack_ping_for_keepalive);
+               set_bit(RXRPC_CALL_EV_PING, &call->events);
+       }
+
+       t = READ_ONCE(call->ping_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
+               cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
+               set_bit(RXRPC_CALL_EV_PING, &call->events);
+       }
+
+       t = READ_ONCE(call->resend_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
+               cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
+               set_bit(RXRPC_CALL_EV_RESEND, &call->events);
+       }
+
+       /* Process events */
+       if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
                rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                goto recheck_state;
        }
 
-       if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
+       send_ack = NULL;
+       if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
+               call->acks_lost_top = call->tx_top;
+               rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+                                 rxrpc_propose_ack_ping_for_lost_ack);
+               send_ack = &call->acks_lost_ping;
+       }
+
+       if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
+           send_ack) {
                if (call->ackr_reason) {
-                       rxrpc_send_ack_packet(call, false);
+                       rxrpc_send_ack_packet(call, false, send_ack);
                        goto recheck_state;
                }
        }
 
        if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
-               rxrpc_send_ack_packet(call, true);
+               rxrpc_send_ack_packet(call, true, NULL);
                goto recheck_state;
        }
 
@@ -408,7 +422,24 @@ recheck_state:
                goto recheck_state;
        }
 
-       rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+       /* Make sure the timer is restarted */
+       next = call->expect_rx_by;
+
+#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+
+       set(call->expect_req_by);
+       set(call->expect_term_by);
+       set(call->ack_at);
+       set(call->ack_lost_at);
+       set(call->resend_at);
+       set(call->keepalive_at);
+       set(call->ping_at);
+
+       now = jiffies;
+       if (time_after_eq(now, next))
+               goto recheck_state;
+
+       rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
 
        /* other events may have been raised since we started checking */
        if (call->events && call->state < RXRPC_CALL_COMPLETE) {
index 994dc2d..0b2db38 100644 (file)
@@ -51,10 +51,14 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
 
        _enter("%d", call->debug_id);
 
-       if (call->state < RXRPC_CALL_COMPLETE)
-               rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
+       if (call->state < RXRPC_CALL_COMPLETE) {
+               trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
+               rxrpc_queue_call(call);
+       }
 }
 
+static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
+
 /*
  * find an extant server call
  * - called in process context with IRQs enabled
@@ -95,7 +99,7 @@ found_extant_call:
 /*
  * allocate a new call
  */
-struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
 {
        struct rxrpc_call *call;
 
@@ -114,6 +118,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
                goto nomem_2;
 
        mutex_init(&call->user_mutex);
+
+       /* Prevent lockdep reporting a deadlock false positive between the afs
+        * filesystem and sys_sendmsg() via the mmap sem.
+        */
+       if (rx->sk.sk_kern_sock)
+               lockdep_set_class(&call->user_mutex,
+                                 &rxrpc_call_user_mutex_lock_class_key);
+
        timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
        INIT_WORK(&call->processor, &rxrpc_process_call);
        INIT_LIST_HEAD(&call->link);
@@ -128,6 +140,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
        atomic_set(&call->usage, 1);
        call->debug_id = atomic_inc_return(&rxrpc_debug_id);
        call->tx_total_len = -1;
+       call->next_rx_timo = 20 * HZ;
+       call->next_req_timo = 1 * HZ;
 
        memset(&call->sock_node, 0xed, sizeof(call->sock_node));
 
@@ -150,7 +164,8 @@ nomem:
 /*
  * Allocate a new client call.
  */
-static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
+static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
+                                                 struct sockaddr_rxrpc *srx,
                                                  gfp_t gfp)
 {
        struct rxrpc_call *call;
@@ -158,7 +173,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
 
        _enter("");
 
-       call = rxrpc_alloc_call(gfp);
+       call = rxrpc_alloc_call(rx, gfp);
        if (!call)
                return ERR_PTR(-ENOMEM);
        call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
@@ -177,15 +192,17 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
  */
 static void rxrpc_start_call_timer(struct rxrpc_call *call)
 {
-       ktime_t now = ktime_get_real(), expire_at;
-
-       expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
-       call->expire_at = expire_at;
-       call->ack_at = expire_at;
-       call->ping_at = expire_at;
-       call->resend_at = expire_at;
-       call->timer.expires = jiffies + LONG_MAX / 2;
-       rxrpc_set_timer(call, rxrpc_timer_begin, now);
+       unsigned long now = jiffies;
+       unsigned long j = now + MAX_JIFFY_OFFSET;
+
+       call->ack_at = j;
+       call->ack_lost_at = j;
+       call->resend_at = j;
+       call->ping_at = j;
+       call->expect_rx_by = j;
+       call->expect_req_by = j;
+       call->expect_term_by = j;
+       call->timer.expires = now;
 }
 
 /*
@@ -196,8 +213,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
                                         struct rxrpc_conn_parameters *cp,
                                         struct sockaddr_rxrpc *srx,
-                                        unsigned long user_call_ID,
-                                        s64 tx_total_len,
+                                        struct rxrpc_call_params *p,
                                         gfp_t gfp)
        __releases(&rx->sk.sk_lock.slock)
 {
@@ -207,18 +223,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        const void *here = __builtin_return_address(0);
        int ret;
 
-       _enter("%p,%lx", rx, user_call_ID);
+       _enter("%p,%lx", rx, p->user_call_ID);
 
-       call = rxrpc_alloc_client_call(srx, gfp);
+       call = rxrpc_alloc_client_call(rx, srx, gfp);
        if (IS_ERR(call)) {
                release_sock(&rx->sk);
                _leave(" = %ld", PTR_ERR(call));
                return call;
        }
 
-       call->tx_total_len = tx_total_len;
+       call->tx_total_len = p->tx_total_len;
        trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
-                        here, (const void *)user_call_ID);
+                        here, (const void *)p->user_call_ID);
 
        /* We need to protect a partially set up call against the user as we
         * will be acting outside the socket lock.
@@ -234,16 +250,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
                parent = *pp;
                xcall = rb_entry(parent, struct rxrpc_call, sock_node);
 
-               if (user_call_ID < xcall->user_call_ID)
+               if (p->user_call_ID < xcall->user_call_ID)
                        pp = &(*pp)->rb_left;
-               else if (user_call_ID > xcall->user_call_ID)
+               else if (p->user_call_ID > xcall->user_call_ID)
                        pp = &(*pp)->rb_right;
                else
                        goto error_dup_user_ID;
        }
 
        rcu_assign_pointer(call->socket, rx);
-       call->user_call_ID = user_call_ID;
+       call->user_call_ID = p->user_call_ID;
        __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
        rxrpc_get_call(call, rxrpc_call_got_userid);
        rb_link_node(&call->sock_node, parent, pp);
index 5f9624b..7f74ca3 100644 (file)
@@ -85,8 +85,8 @@
 
 __read_mostly unsigned int rxrpc_max_client_connections = 1000;
 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
-__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
-__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
 
 /*
  * We use machine-unique IDs for our client connections.
@@ -554,6 +554,11 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
 
        trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
 
+       /* Cancel the final ACK on the previous call if it hasn't been sent yet
+        * as the DATA packet will implicitly ACK it.
+        */
+       clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
+
        write_lock_bh(&call->state_lock);
        if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
                call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
@@ -686,7 +691,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
 
        _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
 
-       rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
+       rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
        rxrpc_cull_active_client_conns(rxnet);
 
        ret = rxrpc_get_client_conn(call, cp, srx, gfp);
@@ -751,6 +756,18 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
        }
 }
 
+/*
+ * Set the reap timer.
+ */
+static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
+{
+       unsigned long now = jiffies;
+       unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
+
+       if (rxnet->live)
+               timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+}
+
 /*
  * Disconnect a client call.
  */
@@ -813,6 +830,19 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
                goto out_2;
        }
 
+       /* Schedule the final ACK to be transmitted in a short while so that it
+        * can be skipped if we find a follow-on call.  The first DATA packet
+        * of the follow on call will implicitly ACK this call.
+        */
+       if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+               unsigned long final_ack_at = jiffies + 2;
+
+               WRITE_ONCE(chan->final_ack_at, final_ack_at);
+               smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
+               set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
+               rxrpc_reduce_conn_timer(conn, final_ack_at);
+       }
+
        /* Things are more complex and we need the cache lock.  We might be
         * able to simply idle the conn or it might now be lurking on the wait
         * list.  It might even get moved back to the active list whilst we're
@@ -878,9 +908,7 @@ idle_connection:
                list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
                if (rxnet->idle_client_conns.next == &conn->cache_link &&
                    !rxnet->kill_all_client_conns)
-                       queue_delayed_work(rxrpc_workqueue,
-                                          &rxnet->client_conn_reaper,
-                                          rxrpc_conn_idle_client_expiry);
+                       rxrpc_set_client_reap_timer(rxnet);
        } else {
                trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
                conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
@@ -1018,8 +1046,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_net *rxnet =
-               container_of(to_delayed_work(work),
-                            struct rxrpc_net, client_conn_reaper);
+               container_of(work, struct rxrpc_net, client_conn_reaper);
        unsigned long expiry, conn_expires_at, now;
        unsigned int nr_conns;
        bool did_discard = false;
@@ -1061,6 +1088,8 @@ next:
                expiry = rxrpc_conn_idle_client_expiry;
                if (nr_conns > rxrpc_reap_client_connections)
                        expiry = rxrpc_conn_idle_client_fast_expiry;
+               if (conn->params.local->service_closed)
+                       expiry = rxrpc_closed_conn_expiry * HZ;
 
                conn_expires_at = conn->idle_timestamp + expiry;
 
@@ -1096,9 +1125,8 @@ not_yet_expired:
         */
        _debug("not yet");
        if (!rxnet->kill_all_client_conns)
-               queue_delayed_work(rxrpc_workqueue,
-                                  &rxnet->client_conn_reaper,
-                                  conn_expires_at - now);
+               timer_reduce(&rxnet->client_conn_reap_timer,
+                            conn_expires_at);
 
 out:
        spin_unlock(&rxnet->client_conn_cache_lock);
@@ -1118,9 +1146,9 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
        rxnet->kill_all_client_conns = true;
        spin_unlock(&rxnet->client_conn_cache_lock);
 
-       cancel_delayed_work(&rxnet->client_conn_reaper);
+       del_timer_sync(&rxnet->client_conn_reap_timer);
 
-       if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
+       if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
                _debug("destroy: queue failed");
 
        _leave("");
index 59a51a5..4ca11be 100644 (file)
  * Retransmit terminal ACK or ABORT of the previous call.
  */
 static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
-                                      struct sk_buff *skb)
+                                      struct sk_buff *skb,
+                                      unsigned int channel)
 {
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
        struct rxrpc_channel *chan;
        struct msghdr msg;
-       struct kvec iov;
+       struct kvec iov[3];
        struct {
                struct rxrpc_wire_header whdr;
                union {
-                       struct {
-                               __be32 code;
-                       } abort;
-                       struct {
-                               struct rxrpc_ackpacket ack;
-                               u8 padding[3];
-                               struct rxrpc_ackinfo info;
-                       };
+                       __be32 abort_code;
+                       struct rxrpc_ackpacket ack;
                };
        } __attribute__((packed)) pkt;
+       struct rxrpc_ackinfo ack_info;
        size_t len;
-       u32 serial, mtu, call_id;
+       int ioc;
+       u32 serial, mtu, call_id, padding;
 
        _enter("%d", conn->debug_id);
 
-       chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK];
+       chan = &conn->channels[channel];
 
        /* If the last call got moved on whilst we were waiting to run, just
         * ignore this packet.
@@ -56,7 +53,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        call_id = READ_ONCE(chan->last_call);
        /* Sync with __rxrpc_disconnect_call() */
        smp_rmb();
-       if (call_id != sp->hdr.callNumber)
+       if (skb && call_id != sp->hdr.callNumber)
                return;
 
        msg.msg_name    = &conn->params.peer->srx.transport;
@@ -65,9 +62,16 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        msg.msg_controllen = 0;
        msg.msg_flags   = 0;
 
-       pkt.whdr.epoch          = htonl(sp->hdr.epoch);
-       pkt.whdr.cid            = htonl(sp->hdr.cid);
-       pkt.whdr.callNumber     = htonl(sp->hdr.callNumber);
+       iov[0].iov_base = &pkt;
+       iov[0].iov_len  = sizeof(pkt.whdr);
+       iov[1].iov_base = &padding;
+       iov[1].iov_len  = 3;
+       iov[2].iov_base = &ack_info;
+       iov[2].iov_len  = sizeof(ack_info);
+
+       pkt.whdr.epoch          = htonl(conn->proto.epoch);
+       pkt.whdr.cid            = htonl(conn->proto.cid);
+       pkt.whdr.callNumber     = htonl(call_id);
        pkt.whdr.seq            = 0;
        pkt.whdr.type           = chan->last_type;
        pkt.whdr.flags          = conn->out_clientflag;
@@ -79,27 +83,35 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        len = sizeof(pkt.whdr);
        switch (chan->last_type) {
        case RXRPC_PACKET_TYPE_ABORT:
-               pkt.abort.code  = htonl(chan->last_abort);
-               len += sizeof(pkt.abort);
+               pkt.abort_code  = htonl(chan->last_abort);
+               iov[0].iov_len += sizeof(pkt.abort_code);
+               len += sizeof(pkt.abort_code);
+               ioc = 1;
                break;
 
        case RXRPC_PACKET_TYPE_ACK:
                mtu = conn->params.peer->if_mtu;
                mtu -= conn->params.peer->hdrsize;
                pkt.ack.bufferSpace     = 0;
-               pkt.ack.maxSkew         = htons(skb->priority);
-               pkt.ack.firstPacket     = htonl(chan->last_seq);
-               pkt.ack.previousPacket  = htonl(chan->last_seq - 1);
-               pkt.ack.serial          = htonl(sp->hdr.serial);
-               pkt.ack.reason          = RXRPC_ACK_DUPLICATE;
+               pkt.ack.maxSkew         = htons(skb ? skb->priority : 0);
+               pkt.ack.firstPacket     = htonl(chan->last_seq + 1);
+               pkt.ack.previousPacket  = htonl(chan->last_seq);
+               pkt.ack.serial          = htonl(skb ? sp->hdr.serial : 0);
+               pkt.ack.reason          = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
                pkt.ack.nAcks           = 0;
-               pkt.info.rxMTU          = htonl(rxrpc_rx_mtu);
-               pkt.info.maxMTU         = htonl(mtu);
-               pkt.info.rwind          = htonl(rxrpc_rx_window_size);
-               pkt.info.jumbo_max      = htonl(rxrpc_rx_jumbo_max);
+               ack_info.rxMTU          = htonl(rxrpc_rx_mtu);
+               ack_info.maxMTU         = htonl(mtu);
+               ack_info.rwind          = htonl(rxrpc_rx_window_size);
+               ack_info.jumbo_max      = htonl(rxrpc_rx_jumbo_max);
                pkt.whdr.flags          |= RXRPC_SLOW_START_OK;
-               len += sizeof(pkt.ack) + sizeof(pkt.info);
+               padding                 = 0;
+               iov[0].iov_len += sizeof(pkt.ack);
+               len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
+               ioc = 3;
                break;
+
+       default:
+               return;
        }
 
        /* Resync with __rxrpc_disconnect_call() and check that the last call
@@ -109,9 +121,6 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        if (READ_ONCE(chan->last_call) != call_id)
                return;
 
-       iov.iov_base    = &pkt;
-       iov.iov_len     = len;
-
        serial = atomic_inc_return(&conn->serial);
        pkt.whdr.serial = htonl(serial);
 
@@ -126,7 +135,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                break;
        }
 
-       kernel_sendmsg(conn->params.local->socket, &msg, &iov, 1, len);
+       kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
        _leave("");
        return;
 }
@@ -272,7 +281,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
        case RXRPC_PACKET_TYPE_ACK:
-               rxrpc_conn_retransmit_call(conn, skb);
+               rxrpc_conn_retransmit_call(conn, skb,
+                                          sp->hdr.cid & RXRPC_CHANNELMASK);
                return 0;
 
        case RXRPC_PACKET_TYPE_BUSY:
@@ -378,6 +388,48 @@ abort:
        _leave(" [aborted]");
 }
 
+/*
+ * Process delayed final ACKs that we haven't subsumed into a subsequent call.
+ */
+static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
+{
+       unsigned long j = jiffies, next_j;
+       unsigned int channel;
+       bool set;
+
+again:
+       next_j = j + LONG_MAX;
+       set = false;
+       for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
+               struct rxrpc_channel *chan = &conn->channels[channel];
+               unsigned long ack_at;
+
+               if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
+                       continue;
+
+               smp_rmb(); /* vs rxrpc_disconnect_client_call */
+               ack_at = READ_ONCE(chan->final_ack_at);
+
+               if (time_before(j, ack_at)) {
+                       if (time_before(ack_at, next_j)) {
+                               next_j = ack_at;
+                               set = true;
+                       }
+                       continue;
+               }
+
+               if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
+                                      &conn->flags))
+                       rxrpc_conn_retransmit_call(conn, NULL, channel);
+       }
+
+       j = jiffies;
+       if (time_before_eq(next_j, j))
+               goto again;
+       if (set)
+               rxrpc_reduce_conn_timer(conn, next_j);
+}
+
 /*
  * connection-level event processor
  */
@@ -394,6 +446,10 @@ void rxrpc_process_connection(struct work_struct *work)
        if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
                rxrpc_secure_connection(conn);
 
+       /* Process delayed ACKs whose time has come. */
+       if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
+               rxrpc_process_delayed_final_acks(conn);
+
        /* go through the conn-level event packets, releasing the ref on this
         * connection that each one has when we've finished with it */
        while ((skb = skb_dequeue(&conn->rx_queue))) {
index fe57579..c628351 100644 (file)
 /*
  * Time till a connection expires after last use (in seconds).
  */
-unsigned int rxrpc_connection_expiry = 10 * 60;
+unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
+unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
 
 static void rxrpc_destroy_connection(struct rcu_head *);
 
+static void rxrpc_connection_timer(struct timer_list *timer)
+{
+       struct rxrpc_connection *conn =
+               container_of(timer, struct rxrpc_connection, timer);
+
+       rxrpc_queue_conn(conn);
+}
+
 /*
  * allocate a new connection
  */
@@ -38,6 +47,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
                INIT_LIST_HEAD(&conn->cache_link);
                spin_lock_init(&conn->channel_lock);
                INIT_LIST_HEAD(&conn->waiting_calls);
+               timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
                INIT_WORK(&conn->processor, &rxrpc_process_connection);
                INIT_LIST_HEAD(&conn->proc_link);
                INIT_LIST_HEAD(&conn->link);
@@ -300,22 +310,30 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
        return conn;
 }
 
+/*
+ * Set the service connection reap timer.
+ */
+static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
+                                        unsigned long reap_at)
+{
+       if (rxnet->live)
+               timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
+}
+
 /*
  * Release a service connection
  */
 void rxrpc_put_service_conn(struct rxrpc_connection *conn)
 {
-       struct rxrpc_net *rxnet;
        const void *here = __builtin_return_address(0);
        int n;
 
        n = atomic_dec_return(&conn->usage);
        trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
        ASSERTCMP(n, >=, 0);
-       if (n == 0) {
-               rxnet = conn->params.local->rxnet;
-               rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
-       }
+       if (n == 1)
+               rxrpc_set_service_reap_timer(conn->params.local->rxnet,
+                                            jiffies + rxrpc_connection_expiry);
 }
 
 /*
@@ -332,6 +350,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
 
        _net("DESTROY CONN %d", conn->debug_id);
 
+       del_timer_sync(&conn->timer);
        rxrpc_purge_queue(&conn->rx_queue);
 
        conn->security->clear(conn);
@@ -351,17 +370,15 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
 {
        struct rxrpc_connection *conn, *_p;
        struct rxrpc_net *rxnet =
-               container_of(to_delayed_work(work),
-                            struct rxrpc_net, service_conn_reaper);
-       unsigned long reap_older_than, earliest, idle_timestamp, now;
+               container_of(work, struct rxrpc_net, service_conn_reaper);
+       unsigned long expire_at, earliest, idle_timestamp, now;
 
        LIST_HEAD(graveyard);
 
        _enter("");
 
        now = jiffies;
-       reap_older_than = now - rxrpc_connection_expiry * HZ;
-       earliest = ULONG_MAX;
+       earliest = now + MAX_JIFFY_OFFSET;
 
        write_lock(&rxnet->conn_lock);
        list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
@@ -371,15 +388,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
                if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
                        continue;
 
-               idle_timestamp = READ_ONCE(conn->idle_timestamp);
-               _debug("reap CONN %d { u=%d,t=%ld }",
-                      conn->debug_id, atomic_read(&conn->usage),
-                      (long)reap_older_than - (long)idle_timestamp);
-
-               if (time_after(idle_timestamp, reap_older_than)) {
-                       if (time_before(idle_timestamp, earliest))
-                               earliest = idle_timestamp;
-                       continue;
+               if (rxnet->live) {
+                       idle_timestamp = READ_ONCE(conn->idle_timestamp);
+                       expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
+                       if (conn->params.local->service_closed)
+                               expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
+
+                       _debug("reap CONN %d { u=%d,t=%ld }",
+                              conn->debug_id, atomic_read(&conn->usage),
+                              (long)expire_at - (long)now);
+
+                       if (time_before(now, expire_at)) {
+                               if (time_before(expire_at, earliest))
+                                       earliest = expire_at;
+                               continue;
+                       }
                }
 
                /* The usage count sits at 1 whilst the object is unused on the
@@ -387,6 +410,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
                 */
                if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
                        continue;
+               trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
 
                if (rxrpc_conn_is_client(conn))
                        BUG();
@@ -397,11 +421,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
        }
        write_unlock(&rxnet->conn_lock);
 
-       if (earliest != ULONG_MAX) {
-               _debug("reschedule reaper %ld", (long) earliest - now);
+       if (earliest != now + MAX_JIFFY_OFFSET) {
+               _debug("reschedule reaper %ld", (long)earliest - (long)now);
                ASSERT(time_after(earliest, now));
-               rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
-                                        earliest - now);
+               rxrpc_set_service_reap_timer(rxnet, earliest);
        }
 
        while (!list_empty(&graveyard)) {
@@ -429,9 +452,8 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
 
        rxrpc_destroy_all_client_connections(rxnet);
 
-       rxrpc_connection_expiry = 0;
-       cancel_delayed_work(&rxnet->client_conn_reaper);
-       rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
+       del_timer_sync(&rxnet->service_conn_reap_timer);
+       rxrpc_queue_work(&rxnet->service_conn_reaper);
        flush_workqueue(rxrpc_workqueue);
 
        write_lock(&rxnet->conn_lock);
index 1b59207..6fc6140 100644 (file)
@@ -318,16 +318,18 @@ bad_state:
 static bool rxrpc_receiving_reply(struct rxrpc_call *call)
 {
        struct rxrpc_ack_summary summary = { 0 };
+       unsigned long now, timo;
        rxrpc_seq_t top = READ_ONCE(call->tx_top);
 
        if (call->ackr_reason) {
                spin_lock_bh(&call->lock);
                call->ackr_reason = 0;
-               call->resend_at = call->expire_at;
-               call->ack_at = call->expire_at;
                spin_unlock_bh(&call->lock);
-               rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
-                               ktime_get_real());
+               now = jiffies;
+               timo = now + MAX_JIFFY_OFFSET;
+               WRITE_ONCE(call->resend_at, timo);
+               WRITE_ONCE(call->ack_at, timo);
+               trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
        }
 
        if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
@@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
        if (state >= RXRPC_CALL_COMPLETE)
                return;
 
+       if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+               unsigned long timo = READ_ONCE(call->next_req_timo);
+               unsigned long now, expect_req_by;
+
+               if (timo) {
+                       now = jiffies;
+                       expect_req_by = now + timo;
+                       WRITE_ONCE(call->expect_req_by, expect_req_by);
+                       rxrpc_reduce_call_timer(call, expect_req_by, now,
+                                               rxrpc_timer_set_for_idle);
+               }
+       }
+
        /* Received data implicitly ACKs all of the request packets we sent
         * when we're acting as a client.
         */
@@ -615,6 +630,43 @@ found:
                           orig_serial, ack_serial, sent_at, resp_time);
 }
 
+/*
+ * Process the response to a ping that we sent to find out if we lost an ACK.
+ *
+ * If we got back a ping response that indicates a lower tx_top than what we
+ * had at the time of the ping transmission, we adjudge all the DATA packets
+ * sent between the response tx_top and the ping-time tx_top to have been lost.
+ */
+static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
+{
+       rxrpc_seq_t top, bottom, seq;
+       bool resend = false;
+
+       spin_lock_bh(&call->lock);
+
+       bottom = call->tx_hard_ack + 1;
+       top = call->acks_lost_top;
+       if (before(bottom, top)) {
+               for (seq = bottom; before_eq(seq, top); seq++) {
+                       int ix = seq & RXRPC_RXTX_BUFF_MASK;
+                       u8 annotation = call->rxtx_annotations[ix];
+                       u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
+
+                       if (anno_type != RXRPC_TX_ANNO_UNACK)
+                               continue;
+                       annotation &= ~RXRPC_TX_ANNO_MASK;
+                       annotation |= RXRPC_TX_ANNO_RETRANS;
+                       call->rxtx_annotations[ix] = annotation;
+                       resend = true;
+               }
+       }
+
+       spin_unlock_bh(&call->lock);
+
+       if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+               rxrpc_queue_call(call);
+}
+
 /*
  * Process a ping response.
  */
@@ -630,6 +682,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
        smp_rmb();
        ping_serial = call->ping_serial;
 
+       if (orig_serial == call->acks_lost_ping)
+               rxrpc_input_check_for_lost_ack(call);
+
        if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
            before(orig_serial, ping_serial))
                return;
@@ -908,9 +963,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
                                    struct sk_buff *skb, u16 skew)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       unsigned long timo;
 
        _enter("%p,%p", call, skb);
 
+       timo = READ_ONCE(call->next_rx_timo);
+       if (timo) {
+               unsigned long now = jiffies, expect_rx_by;
+
+               expect_rx_by = jiffies + timo;
+               WRITE_ONCE(call->expect_rx_by, expect_rx_by);
+               rxrpc_reduce_call_timer(call, expect_rx_by, now,
+                                       rxrpc_timer_set_for_normal);
+       }
+
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
                rxrpc_input_data(call, skb, skew);
@@ -1147,7 +1213,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
                                goto reupgrade;
                        conn->service_id = sp->hdr.serviceId;
                }
-               
+
                if (sp->hdr.callNumber == 0) {
                        /* Connection-level packet */
                        _debug("CONN %p {%d}", conn, conn->debug_id);
index 1a2d4b1..c1d9e7f 100644 (file)
  */
 unsigned int rxrpc_max_backlog __read_mostly = 10;
 
-/*
- * Maximum lifetime of a call (in mx).
- */
-unsigned int rxrpc_max_call_lifetime = 60 * 1000;
-
 /*
  * How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in ms).
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
  */
-unsigned int rxrpc_requested_ack_delay = 1;
+unsigned long rxrpc_requested_ack_delay = 1;
 
 /*
- * How long to wait before scheduling an ACK with subtype DELAY (in ms).
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
  *
  * We use this when we've received new data packets.  If those packets aren't
  * all consumed within this time we will send a DELAY ACK if an ACK was not
  * requested to let the sender know it doesn't need to resend.
  */
-unsigned int rxrpc_soft_ack_delay = 1 * 1000;
+unsigned long rxrpc_soft_ack_delay = HZ;
 
 /*
- * How long to wait before scheduling an ACK with subtype IDLE (in ms).
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
  *
  * We use this when we've consumed some previously soft-ACK'd packets when
  * further packets aren't immediately received to decide when to send an IDLE
  * ACK let the other end know that it can free up its Tx buffer space.
  */
-unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
+unsigned long rxrpc_idle_ack_delay = HZ / 2;
 
 /*
  * Receive window size in packets.  This indicates the maximum number of
@@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4;
 /*
  * Time till packet resend (in milliseconds).
  */
-unsigned int rxrpc_resend_timeout = 4 * 1000;
+unsigned long rxrpc_resend_timeout = 4 * HZ;
 
 const s8 rxrpc_ack_priority[] = {
        [0]                             = 0,
index 7edceb8..f18c924 100644 (file)
 
 unsigned int rxrpc_net_id;
 
+static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+{
+       struct rxrpc_net *rxnet =
+               container_of(timer, struct rxrpc_net, client_conn_reap_timer);
+
+       if (rxnet->live)
+               rxrpc_queue_work(&rxnet->client_conn_reaper);
+}
+
+static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
+{
+       struct rxrpc_net *rxnet =
+               container_of(timer, struct rxrpc_net, service_conn_reap_timer);
+
+       if (rxnet->live)
+               rxrpc_queue_work(&rxnet->service_conn_reaper);
+}
+
 /*
  * Initialise a per-network namespace record.
  */
@@ -22,6 +40,7 @@ static __net_init int rxrpc_init_net(struct net *net)
        struct rxrpc_net *rxnet = rxrpc_net(net);
        int ret;
 
+       rxnet->live = true;
        get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
        rxnet->epoch |= RXRPC_RANDOM_EPOCH;
 
@@ -31,8 +50,10 @@ static __net_init int rxrpc_init_net(struct net *net)
        INIT_LIST_HEAD(&rxnet->conn_proc_list);
        INIT_LIST_HEAD(&rxnet->service_conns);
        rwlock_init(&rxnet->conn_lock);
-       INIT_DELAYED_WORK(&rxnet->service_conn_reaper,
-                         rxrpc_service_connection_reaper);
+       INIT_WORK(&rxnet->service_conn_reaper,
+                 rxrpc_service_connection_reaper);
+       timer_setup(&rxnet->service_conn_reap_timer,
+                   rxrpc_service_conn_reap_timeout, 0);
 
        rxnet->nr_client_conns = 0;
        rxnet->nr_active_client_conns = 0;
@@ -42,8 +63,10 @@ static __net_init int rxrpc_init_net(struct net *net)
        INIT_LIST_HEAD(&rxnet->waiting_client_conns);
        INIT_LIST_HEAD(&rxnet->active_client_conns);
        INIT_LIST_HEAD(&rxnet->idle_client_conns);
-       INIT_DELAYED_WORK(&rxnet->client_conn_reaper,
-                         rxrpc_discard_expired_client_conns);
+       INIT_WORK(&rxnet->client_conn_reaper,
+                 rxrpc_discard_expired_client_conns);
+       timer_setup(&rxnet->client_conn_reap_timer,
+                   rxrpc_client_conn_reap_timeout, 0);
 
        INIT_LIST_HEAD(&rxnet->local_endpoints);
        mutex_init(&rxnet->local_mutex);
@@ -60,6 +83,7 @@ static __net_init int rxrpc_init_net(struct net *net)
        return 0;
 
 err_proc:
+       rxnet->live = false;
        return ret;
 }
 
@@ -70,6 +94,7 @@ static __net_exit void rxrpc_exit_net(struct net *net)
 {
        struct rxrpc_net *rxnet = rxrpc_net(net);
 
+       rxnet->live = false;
        rxrpc_destroy_all_calls(rxnet);
        rxrpc_destroy_all_connections(rxnet);
        rxrpc_destroy_all_locals(rxnet);
index f47659c..42410e9 100644 (file)
@@ -32,6 +32,24 @@ struct rxrpc_abort_buffer {
        __be32 abort_code;
 };
 
+/*
+ * Arrange for a keepalive ping a certain time after we last transmitted.  This
+ * lets the far side know we're still interested in this call and helps keep
+ * the route through any intervening firewall open.
+ *
+ * Receiving a response to the ping will prevent the ->expect_rx_by timer from
+ * expiring.
+ */
+static void rxrpc_set_keepalive(struct rxrpc_call *call)
+{
+       unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+
+       keepalive_at += now;
+       WRITE_ONCE(call->keepalive_at, keepalive_at);
+       rxrpc_reduce_call_timer(call, keepalive_at, now,
+                               rxrpc_timer_set_for_keepalive);
+}
+
 /*
  * Fill out an ACK packet.
  */
@@ -95,7 +113,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
 /*
  * Send an ACK call packet.
  */
-int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
+int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
+                         rxrpc_serial_t *_serial)
 {
        struct rxrpc_connection *conn = NULL;
        struct rxrpc_ack_buffer *pkt;
@@ -165,6 +184,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
                           ntohl(pkt->ack.firstPacket),
                           ntohl(pkt->ack.serial),
                           pkt->ack.reason, pkt->ack.nAcks);
+       if (_serial)
+               *_serial = serial;
 
        if (ping) {
                call->ping_serial = serial;
@@ -202,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
                                call->ackr_seen = top;
                        spin_unlock_bh(&call->lock);
                }
+
+               rxrpc_set_keepalive(call);
        }
 
 out:
@@ -323,7 +346,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
         * ACKs if a DATA packet appears to have been lost.
         */
        if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
-           (retrans ||
+           (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
+            retrans ||
             call->cong_mode == RXRPC_CALL_SLOW_START ||
             (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
             ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
@@ -370,8 +394,23 @@ done:
                if (whdr.flags & RXRPC_REQUEST_ACK) {
                        call->peer->rtt_last_req = now;
                        trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
+                       if (call->peer->rtt_usage > 1) {
+                               unsigned long nowj = jiffies, ack_lost_at;
+
+                               ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
+                               if (ack_lost_at < 1)
+                                       ack_lost_at = 1;
+
+                               ack_lost_at += nowj;
+                               WRITE_ONCE(call->ack_lost_at, ack_lost_at);
+                               rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
+                                                       rxrpc_timer_set_for_lost_ack);
+                       }
                }
        }
+
+       rxrpc_set_keepalive(call);
+
        _leave(" = %d [%u]", ret, call->peer->maxdata);
        return ret;
 
index 8510a98..cc21e8d 100644 (file)
@@ -144,11 +144,13 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
        trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
        ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
 
+#if 0 // TODO: May want to transmit final ACK under some circumstances anyway
        if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
                rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
                                  rxrpc_propose_ack_terminal_ack);
-               rxrpc_send_ack_packet(call, false);
+               rxrpc_send_ack_packet(call, false, NULL);
        }
+#endif
 
        write_lock_bh(&call->state_lock);
 
@@ -161,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
        case RXRPC_CALL_SERVER_RECV_REQUEST:
                call->tx_phase = true;
                call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
-               call->ack_at = call->expire_at;
+               call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
                write_unlock_bh(&call->state_lock);
                rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
                                  rxrpc_propose_ack_processing_op);
@@ -217,10 +219,10 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
                    after_eq(top, call->ackr_seen + 2) ||
                    (hard_ack == top && after(hard_ack, call->ackr_consumed)))
                        rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
-                                         true, false,
+                                         true, true,
                                          rxrpc_propose_ack_rotate_rx);
-               if (call->ackr_reason)
-                       rxrpc_send_ack_packet(call, false);
+               if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
+                       rxrpc_send_ack_packet(call, false, NULL);
        }
 }
 
index 7d25955..09f2a3e 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-enum rxrpc_command {
-       RXRPC_CMD_SEND_DATA,            /* send data message */
-       RXRPC_CMD_SEND_ABORT,           /* request abort generation */
-       RXRPC_CMD_ACCEPT,               /* [server] accept incoming call */
-       RXRPC_CMD_REJECT_BUSY,          /* [server] reject a call as busy */
-};
-
-struct rxrpc_send_params {
-       s64                     tx_total_len;   /* Total Tx data length (if send data) */
-       unsigned long           user_call_ID;   /* User's call ID */
-       u32                     abort_code;     /* Abort code to Tx (if abort) */
-       enum rxrpc_command      command : 8;    /* The command to implement */
-       bool                    exclusive;      /* Shared or exclusive call */
-       bool                    upgrade;        /* If the connection is upgradeable */
-};
-
 /*
  * Wait for space to appear in the Tx queue or a signal to occur.
  */
@@ -174,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                               rxrpc_notify_end_tx_t notify_end_tx)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       unsigned long now;
        rxrpc_seq_t seq = sp->hdr.seq;
        int ret, ix;
        u8 annotation = RXRPC_TX_ANNO_UNACK;
@@ -213,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                        break;
                case RXRPC_CALL_SERVER_ACK_REQUEST:
                        call->state = RXRPC_CALL_SERVER_SEND_REPLY;
-                       call->ack_at = call->expire_at;
+                       now = jiffies;
+                       WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
                        if (call->ackr_reason == RXRPC_ACK_DELAY)
                                call->ackr_reason = 0;
-                       __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
-                                         ktime_get_real());
+                       trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
                        if (!last)
                                break;
                        /* Fall through */
@@ -239,14 +224,19 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                _debug("need instant resend %d", ret);
                rxrpc_instant_resend(call, ix);
        } else {
-               ktime_t now = ktime_get_real(), resend_at;
-
-               resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
-
-               if (ktime_before(resend_at, call->resend_at)) {
-                       call->resend_at = resend_at;
-                       rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
-               }
+               unsigned long now = jiffies, resend_at;
+
+               if (call->peer->rtt_usage > 1)
+                       resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2);
+               else
+                       resend_at = rxrpc_resend_timeout;
+               if (resend_at < 1)
+                       resend_at = 1;
+
+               resend_at += now;
+               WRITE_ONCE(call->resend_at, resend_at);
+               rxrpc_reduce_call_timer(call, resend_at, now,
+                                       rxrpc_timer_set_for_send);
        }
 
        rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
@@ -295,7 +285,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
        do {
                /* Check to see if there's a ping ACK to reply to. */
                if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
-                       rxrpc_send_ack_packet(call, false);
+                       rxrpc_send_ack_packet(call, false, NULL);
 
                if (!skb) {
                        size_t size, chunk, max, space;
@@ -480,11 +470,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
                        if (msg->msg_flags & MSG_CMSG_COMPAT) {
                                if (len != sizeof(u32))
                                        return -EINVAL;
-                               p->user_call_ID = *(u32 *)CMSG_DATA(cmsg);
+                               p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
                        } else {
                                if (len != sizeof(unsigned long))
                                        return -EINVAL;
-                               p->user_call_ID = *(unsigned long *)
+                               p->call.user_call_ID = *(unsigned long *)
                                        CMSG_DATA(cmsg);
                        }
                        got_user_ID = true;
@@ -522,11 +512,24 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
                        break;
 
                case RXRPC_TX_LENGTH:
-                       if (p->tx_total_len != -1 || len != sizeof(__s64))
+                       if (p->call.tx_total_len != -1 || len != sizeof(__s64))
+                               return -EINVAL;
+                       p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
+                       if (p->call.tx_total_len < 0)
                                return -EINVAL;
-                       p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
-                       if (p->tx_total_len < 0)
+                       break;
+
+               case RXRPC_SET_CALL_TIMEOUT:
+                       if (len & 3 || len < 4 || len > 12)
                                return -EINVAL;
+                       memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
+                       p->call.nr_timeouts = len / 4;
+                       if (p->call.timeouts.hard > INT_MAX / HZ)
+                               return -ERANGE;
+                       if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
+                               return -ERANGE;
+                       if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
+                               return -ERANGE;
                        break;
 
                default:
@@ -536,7 +539,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
 
        if (!got_user_ID)
                return -EINVAL;
-       if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
+       if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
                return -EINVAL;
        _leave(" = 0");
        return 0;
@@ -576,8 +579,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
        cp.exclusive            = rx->exclusive | p->exclusive;
        cp.upgrade              = p->upgrade;
        cp.service_id           = srx->srx_service;
-       call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID,
-                                    p->tx_total_len, GFP_KERNEL);
+       call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
        /* The socket is now unlocked */
 
        _leave(" = %p\n", call);
@@ -594,15 +596,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
 {
        enum rxrpc_call_state state;
        struct rxrpc_call *call;
+       unsigned long now, j;
        int ret;
 
        struct rxrpc_send_params p = {
-               .tx_total_len   = -1,
-               .user_call_ID   = 0,
-               .abort_code     = 0,
-               .command        = RXRPC_CMD_SEND_DATA,
-               .exclusive      = false,
-               .upgrade        = true,
+               .call.tx_total_len      = -1,
+               .call.user_call_ID      = 0,
+               .call.nr_timeouts       = 0,
+               .abort_code             = 0,
+               .command                = RXRPC_CMD_SEND_DATA,
+               .exclusive              = false,
+               .upgrade                = false,
        };
 
        _enter("");
@@ -615,15 +619,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                ret = -EINVAL;
                if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
                        goto error_release_sock;
-               call = rxrpc_accept_call(rx, p.user_call_ID, NULL);
+               call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
                /* The socket is now unlocked. */
                if (IS_ERR(call))
                        return PTR_ERR(call);
-               rxrpc_put_call(call, rxrpc_call_put);
-               return 0;
+               ret = 0;
+               goto out_put_unlock;
        }
 
-       call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
+       call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
        if (!call) {
                ret = -EBADSLT;
                if (p.command != RXRPC_CMD_SEND_DATA)
@@ -653,14 +657,39 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                        goto error_put;
                }
 
-               if (p.tx_total_len != -1) {
+               if (p.call.tx_total_len != -1) {
                        ret = -EINVAL;
                        if (call->tx_total_len != -1 ||
                            call->tx_pending ||
                            call->tx_top != 0)
                                goto error_put;
-                       call->tx_total_len = p.tx_total_len;
+                       call->tx_total_len = p.call.tx_total_len;
+               }
+       }
+
+       switch (p.call.nr_timeouts) {
+       case 3:
+               j = msecs_to_jiffies(p.call.timeouts.normal);
+               if (p.call.timeouts.normal > 0 && j == 0)
+                       j = 1;
+               WRITE_ONCE(call->next_rx_timo, j);
+               /* Fall through */
+       case 2:
+               j = msecs_to_jiffies(p.call.timeouts.idle);
+               if (p.call.timeouts.idle > 0 && j == 0)
+                       j = 1;
+               WRITE_ONCE(call->next_req_timo, j);
+               /* Fall through */
+       case 1:
+               if (p.call.timeouts.hard > 0) {
+                       j = msecs_to_jiffies(p.call.timeouts.hard);
+                       now = jiffies;
+                       j += now;
+                       WRITE_ONCE(call->expect_term_by, j);
+                       rxrpc_reduce_call_timer(call, j, now,
+                                               rxrpc_timer_set_for_hard);
                }
+               break;
        }
 
        state = READ_ONCE(call->state);
@@ -689,6 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                ret = rxrpc_send_data(rx, call, msg, len, NULL);
        }
 
+out_put_unlock:
        mutex_unlock(&call->user_mutex);
 error_put:
        rxrpc_put_call(call, rxrpc_call_put);
index 34c706d..4a7af7a 100644 (file)
@@ -21,6 +21,8 @@ static const unsigned int four = 4;
 static const unsigned int thirtytwo = 32;
 static const unsigned int n_65535 = 65535;
 static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
+static const unsigned long one_jiffy = 1;
+static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
 
 /*
  * RxRPC operating parameters.
@@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
  * information on the individual parameters.
  */
 static struct ctl_table rxrpc_sysctl_table[] = {
-       /* Values measured in milliseconds */
+       /* Values measured in milliseconds but used in jiffies */
        {
                .procname       = "req_ack_delay",
                .data           = &rxrpc_requested_ack_delay,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&zero,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
        {
                .procname       = "soft_ack_delay",
                .data           = &rxrpc_soft_ack_delay,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
        {
                .procname       = "idle_ack_delay",
                .data           = &rxrpc_idle_ack_delay,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&one,
-       },
-       {
-               .procname       = "resend_timeout",
-               .data           = &rxrpc_resend_timeout,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
        {
                .procname       = "idle_conn_expiry",
                .data           = &rxrpc_conn_idle_client_expiry,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_ms_jiffies,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
        {
                .procname       = "idle_conn_fast_expiry",
                .data           = &rxrpc_conn_idle_client_fast_expiry,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_ms_jiffies,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
-
-       /* Values measured in seconds but used in jiffies */
        {
-               .procname       = "max_call_lifetime",
-               .data           = &rxrpc_max_call_lifetime,
-               .maxlen         = sizeof(unsigned int),
+               .procname       = "resend_timeout",
+               .data           = &rxrpc_resend_timeout,
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
 
        /* Non-time values */
index e29a48e..a0ac42b 100644 (file)
@@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
        if (action == TC_ACT_SHOT)
                this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
 
-       tm->lastuse = lastuse;
+       tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
index 1e3f10e..6445184 100644 (file)
@@ -22,7 +22,6 @@
 #include <net/pkt_sched.h>
 #include <uapi/linux/tc_act/tc_ife.h>
 #include <net/tc_act/tc_ife.h>
-#include <linux/rtnetlink.h>
 
 static int skbmark_encode(struct sk_buff *skb, void *skbdata,
                          struct tcf_meta_info *e)
index 2ea1f26..7221437 100644 (file)
@@ -22,7 +22,6 @@
 #include <net/pkt_sched.h>
 #include <uapi/linux/tc_act/tc_ife.h>
 #include <net/tc_act/tc_ife.h>
-#include <linux/rtnetlink.h>
 
 static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
                             struct tcf_meta_info *e)
index 8b3e593..08b6184 100644 (file)
@@ -239,7 +239,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
        struct tcf_t *tm = &m->tcf_tm;
 
        _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
-       tm->lastuse = lastuse;
+       tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
index 8b5abcd..9438969 100644 (file)
@@ -96,23 +96,16 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        return ret;
 }
 
-static void tcf_sample_cleanup_rcu(struct rcu_head *rcu)
+static void tcf_sample_cleanup(struct tc_action *a, int bind)
 {
-       struct tcf_sample *s = container_of(rcu, struct tcf_sample, rcu);
+       struct tcf_sample *s = to_sample(a);
        struct psample_group *psample_group;
 
-       psample_group = rcu_dereference_protected(s->psample_group, 1);
+       psample_group = rtnl_dereference(s->psample_group);
        RCU_INIT_POINTER(s->psample_group, NULL);
        psample_group_put(psample_group);
 }
 
-static void tcf_sample_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_sample *s = to_sample(a);
-
-       call_rcu(&s->rcu, tcf_sample_cleanup_rcu);
-}
-
 static bool tcf_sample_dev_ok_push(struct net_device *dev)
 {
        switch (dev->type) {
@@ -264,7 +257,6 @@ static int __init sample_init_module(void)
 
 static void __exit sample_cleanup_module(void)
 {
-       rcu_barrier();
        tcf_unregister_action(&act_sample_ops, &sample_net_ops);
 }
 
index 7d97f61..b9d63d2 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/skbuff.h>
 #include <linux/init.h>
 #include <linux/kmod.h>
-#include <linux/err.h>
 #include <linux/slab.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -336,7 +335,8 @@ static void tcf_block_put_final(struct work_struct *work)
        struct tcf_chain *chain, *tmp;
 
        rtnl_lock();
-       /* Only chain 0 should be still here. */
+
+       /* At this point, all the chains should have refcnt == 1. */
        list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
                tcf_chain_put(chain);
        rtnl_unlock();
@@ -344,15 +344,23 @@ static void tcf_block_put_final(struct work_struct *work)
 }
 
 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
- * actions should be all removed after flushing. However, filters are now
- * destroyed in tc filter workqueue with RTNL lock, they can not race here.
+ * actions should be all removed after flushing.
  */
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
                       struct tcf_block_ext_info *ei)
 {
-       struct tcf_chain *chain, *tmp;
+       struct tcf_chain *chain;
 
-       list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+       if (!block)
+               return;
+       /* Hold a refcnt for all chains, except 0, so that they don't disappear
+        * while we are iterating.
+        */
+       list_for_each_entry(chain, &block->chain_list, list)
+               if (chain->index)
+                       tcf_chain_hold(chain);
+
+       list_for_each_entry(chain, &block->chain_list, list)
                tcf_chain_flush(chain);
 
        tcf_block_offload_unbind(block, q, ei);
index a9f3e31..a62586e 100644 (file)
@@ -42,7 +42,6 @@ struct cls_bpf_prog {
        struct list_head link;
        struct tcf_result res;
        bool exts_integrated;
-       bool offloaded;
        u32 gen_flags;
        struct tcf_exts exts;
        u32 handle;
@@ -148,73 +147,63 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
 }
 
 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
-                              enum tc_clsbpf_command cmd)
+                              struct cls_bpf_prog *oldprog)
 {
-       bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE;
        struct tcf_block *block = tp->chain->block;
-       bool skip_sw = tc_skip_sw(prog->gen_flags);
        struct tc_cls_bpf_offload cls_bpf = {};
+       struct cls_bpf_prog *obj;
+       bool skip_sw;
        int err;
 
+       skip_sw = prog && tc_skip_sw(prog->gen_flags);
+       obj = prog ?: oldprog;
+
        tc_cls_common_offload_init(&cls_bpf.common, tp);
-       cls_bpf.command = cmd;
-       cls_bpf.exts = &prog->exts;
-       cls_bpf.prog = prog->filter;
-       cls_bpf.name = prog->bpf_name;
-       cls_bpf.exts_integrated = prog->exts_integrated;
-       cls_bpf.gen_flags = prog->gen_flags;
+       cls_bpf.command = TC_CLSBPF_OFFLOAD;
+       cls_bpf.exts = &obj->exts;
+       cls_bpf.prog = prog ? prog->filter : NULL;
+       cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
+       cls_bpf.name = obj->bpf_name;
+       cls_bpf.exts_integrated = obj->exts_integrated;
+       cls_bpf.gen_flags = obj->gen_flags;
 
        err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
-       if (addorrep) {
+       if (prog) {
                if (err < 0) {
-                       cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
+                       cls_bpf_offload_cmd(tp, oldprog, prog);
                        return err;
                } else if (err > 0) {
                        prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
                }
        }
 
-       if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
+       if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
                return -EINVAL;
 
        return 0;
 }
 
+static u32 cls_bpf_flags(u32 flags)
+{
+       return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
+}
+
 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
                           struct cls_bpf_prog *oldprog)
 {
-       struct cls_bpf_prog *obj = prog;
-       enum tc_clsbpf_command cmd;
-       bool skip_sw;
-       int ret;
-
-       skip_sw = tc_skip_sw(prog->gen_flags) ||
-               (oldprog && tc_skip_sw(oldprog->gen_flags));
-
-       if (oldprog && oldprog->offloaded) {
-               if (!tc_skip_hw(prog->gen_flags)) {
-                       cmd = TC_CLSBPF_REPLACE;
-               } else if (!tc_skip_sw(prog->gen_flags)) {
-                       obj = oldprog;
-                       cmd = TC_CLSBPF_DESTROY;
-               } else {
-                       return -EINVAL;
-               }
-       } else {
-               if (tc_skip_hw(prog->gen_flags))
-                       return skip_sw ? -EINVAL : 0;
-               cmd = TC_CLSBPF_ADD;
-       }
-
-       ret = cls_bpf_offload_cmd(tp, obj, cmd);
-       if (ret)
-               return ret;
+       if (prog && oldprog &&
+           cls_bpf_flags(prog->gen_flags) !=
+           cls_bpf_flags(oldprog->gen_flags))
+               return -EINVAL;
 
-       obj->offloaded = true;
-       if (oldprog)
-               oldprog->offloaded = false;
+       if (prog && tc_skip_hw(prog->gen_flags))
+               prog = NULL;
+       if (oldprog && tc_skip_hw(oldprog->gen_flags))
+               oldprog = NULL;
+       if (!prog && !oldprog)
+               return 0;
 
-       return 0;
+       return cls_bpf_offload_cmd(tp, prog, oldprog);
 }
 
 static void cls_bpf_stop_offload(struct tcf_proto *tp,
@@ -222,25 +211,26 @@ static void cls_bpf_stop_offload(struct tcf_proto *tp,
 {
        int err;
 
-       if (!prog->offloaded)
-               return;
-
-       err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
-       if (err) {
+       err = cls_bpf_offload_cmd(tp, NULL, prog);
+       if (err)
                pr_err("Stopping hardware offload failed: %d\n", err);
-               return;
-       }
-
-       prog->offloaded = false;
 }
 
 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
                                         struct cls_bpf_prog *prog)
 {
-       if (!prog->offloaded)
-               return;
+       struct tcf_block *block = tp->chain->block;
+       struct tc_cls_bpf_offload cls_bpf = {};
 
-       cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
+       tc_cls_common_offload_init(&cls_bpf.common, tp);
+       cls_bpf.command = TC_CLSBPF_STATS;
+       cls_bpf.exts = &prog->exts;
+       cls_bpf.prog = prog->filter;
+       cls_bpf.name = prog->bpf_name;
+       cls_bpf.exts_integrated = prog->exts_integrated;
+       cls_bpf.gen_flags = prog->gen_flags;
+
+       tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
 }
 
 static int cls_bpf_init(struct tcf_proto *tp)
@@ -258,11 +248,8 @@ static int cls_bpf_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
 {
-       tcf_exts_destroy(&prog->exts);
-       tcf_exts_put_net(&prog->exts);
-
        if (cls_bpf_is_ebpf(prog))
                bpf_prog_put(prog->filter);
        else
@@ -270,6 +257,14 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
 
        kfree(prog->bpf_name);
        kfree(prog->bpf_ops);
+}
+
+static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+{
+       tcf_exts_destroy(&prog->exts);
+       tcf_exts_put_net(&prog->exts);
+
+       cls_bpf_free_parms(prog);
        kfree(prog);
 }
 
@@ -514,12 +509,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                goto errout_idr;
 
        ret = cls_bpf_offload(tp, prog, oldprog);
-       if (ret) {
-               if (!oldprog)
-                       idr_remove_ext(&head->handle_idr, prog->handle);
-               __cls_bpf_delete_prog(prog);
-               return ret;
-       }
+       if (ret)
+               goto errout_parms;
 
        if (!tc_in_hw(prog->gen_flags))
                prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
@@ -537,6 +528,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
        *arg = prog;
        return 0;
 
+errout_parms:
+       cls_bpf_free_parms(prog);
 errout_idr:
        if (!oldprog)
                idr_remove_ext(&head->handle_idr, prog->handle);
index ac152b4..507859c 100644 (file)
@@ -45,7 +45,6 @@
 #include <net/netlink.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
-#include <linux/netdevice.h>
 #include <linux/idr.h>
 
 struct tc_u_knode {
index df3110d..07c10ba 100644 (file)
@@ -51,7 +51,7 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
        if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
                return 0;
 
-       return !memcmp(ptr + nbyte->hdr.off, nbyte->pattern, nbyte->hdr.len);
+       return !memcmp(ptr, nbyte->pattern, nbyte->hdr.len);
 }
 
 static struct tcf_ematch_ops em_nbyte_ops = {
index b6c4f53..52529b7 100644 (file)
@@ -795,6 +795,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        tcm->tcm_info = refcount_read(&q->refcnt);
        if (nla_put_string(skb, TCA_KIND, q->ops->id))
                goto nla_put_failure;
+       if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
+               goto nla_put_failure;
        if (q->ops->dump && q->ops->dump(q, skb) < 0)
                goto nla_put_failure;
        qlen = q->q.qlen;
@@ -1061,17 +1063,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
        }
 
        if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
-               if (qdisc_is_percpu_stats(sch)) {
-                       sch->cpu_bstats =
-                               netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
-                       if (!sch->cpu_bstats)
-                               goto err_out4;
-
-                       sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
-                       if (!sch->cpu_qstats)
-                               goto err_out4;
-               }
-
                if (tca[TCA_STAB]) {
                        stab = qdisc_get_stab(tca[TCA_STAB]);
                        if (IS_ERR(stab)) {
@@ -1113,7 +1104,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
                ops->destroy(sch);
 err_out3:
        dev_put(dev);
-       kfree((char *) sch - sch->padded);
+       qdisc_free(sch);
 err_out2:
        module_put(ops->owner);
 err_out:
@@ -1121,8 +1112,6 @@ err_out:
        return NULL;
 
 err_out4:
-       free_percpu(sch->cpu_bstats);
-       free_percpu(sch->cpu_qstats);
        /*
         * Any broken qdiscs that would require a ops->reset() here?
         * The qdisc was never in action so it shouldn't be necessary.
index 6361be7..525eb3a 100644 (file)
@@ -1158,9 +1158,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
                return -EINVAL;
 
+       err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+       if (err)
+               goto put_rtab;
+
        err = qdisc_class_hash_init(&q->clhash);
        if (err < 0)
-               goto put_rtab;
+               goto put_block;
 
        q->link.sibling = &q->link;
        q->link.common.classid = sch->handle;
@@ -1194,6 +1198,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        cbq_addprio(q, &q->link);
        return 0;
 
+put_block:
+       tcf_block_put(q->link.block);
+
 put_rtab:
        qdisc_put_rtab(q->link.R_tab);
        return err;
index b30a2c7..531250f 100644 (file)
@@ -369,6 +369,9 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
+
        if (ctl->limit > CHOKE_MAX_QUEUE)
                return -EINVAL;
 
index 3839cbb..cac003f 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
@@ -277,6 +278,8 @@ unsigned long dev_trans_start(struct net_device *dev)
 
        if (is_vlan_dev(dev))
                dev = vlan_dev_real_dev(dev);
+       else if (netif_is_macvlan(dev))
+               dev = macvlan_dev_real_dev(dev);
        res = netdev_get_tx_queue(dev, 0)->trans_start;
        for (i = 1; i < dev->num_tx_queues; i++) {
                val = netdev_get_tx_queue(dev, i)->trans_start;
@@ -630,6 +633,19 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        qdisc_skb_head_init(&sch->q);
        spin_lock_init(&sch->q.lock);
 
+       if (ops->static_flags & TCQ_F_CPUSTATS) {
+               sch->cpu_bstats =
+                       netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+               if (!sch->cpu_bstats)
+                       goto errout1;
+
+               sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+               if (!sch->cpu_qstats) {
+                       free_percpu(sch->cpu_bstats);
+                       goto errout1;
+               }
+       }
+
        spin_lock_init(&sch->busylock);
        lockdep_set_class(&sch->busylock,
                          dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
@@ -639,6 +655,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          dev->qdisc_running_key ?: &qdisc_running_key);
 
        sch->ops = ops;
+       sch->flags = ops->static_flags;
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
@@ -646,6 +663,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        refcount_set(&sch->refcnt, 1);
 
        return sch;
+errout1:
+       kfree(p);
 errout:
        return ERR_PTR(err);
 }
@@ -695,7 +714,7 @@ void qdisc_reset(struct Qdisc *qdisc)
 }
 EXPORT_SYMBOL(qdisc_reset);
 
-static void qdisc_free(struct Qdisc *qdisc)
+void qdisc_free(struct Qdisc *qdisc)
 {
        if (qdisc_is_percpu_stats(qdisc)) {
                free_percpu(qdisc->cpu_bstats);
@@ -1037,6 +1056,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
 
        if (!tp_head) {
                RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
+               /* Wait for flying RCU callback before it is freed. */
+               rcu_barrier_bh();
                return;
        }
 
@@ -1052,7 +1073,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
        rcu_assign_pointer(*miniqp->p_miniq, miniq);
 
        if (miniq_old)
-               /* This is counterpart of the rcu barrier above. We need to
+               /* This is counterpart of the rcu barriers above. We need to
                 * block potential new user of miniq_old until all readers
                 * are not seeing it.
                 */
index 17c7130..bc30f91 100644 (file)
@@ -356,6 +356,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched *table = qdisc_priv(sch);
        struct gred_sched_data *q = table->tab[dp];
 
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
+
        if (!q) {
                table->tab[dp] = q = *prealloc;
                *prealloc = NULL;
index 5ecc38f..003e1b0 100644 (file)
@@ -66,7 +66,8 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct ingress_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
-       int err;
+
+       net_inc_ingress_queue();
 
        mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
 
@@ -74,14 +75,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
        q->block_info.chain_head_change = clsact_chain_head_change;
        q->block_info.chain_head_change_priv = &q->miniqp;
 
-       err = tcf_block_get_ext(&q->block, sch, &q->block_info);
-       if (err)
-               return err;
-
-       net_inc_ingress_queue();
-       sch->flags |= TCQ_F_CPUSTATS;
-
-       return 0;
+       return tcf_block_get_ext(&q->block, sch, &q->block_info);
 }
 
 static void ingress_destroy(struct Qdisc *sch)
@@ -120,6 +114,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
        .cl_ops         =       &ingress_class_ops,
        .id             =       "ingress",
        .priv_size      =       sizeof(struct ingress_sched_data),
+       .static_flags   =       TCQ_F_CPUSTATS,
        .init           =       ingress_init,
        .destroy        =       ingress_destroy,
        .dump           =       ingress_dump,
@@ -172,6 +167,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
        struct net_device *dev = qdisc_dev(sch);
        int err;
 
+       net_inc_ingress_queue();
+       net_inc_egress_queue();
+
        mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
 
        q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
@@ -188,20 +186,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
        q->egress_block_info.chain_head_change = clsact_chain_head_change;
        q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
 
-       err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
-       if (err)
-               goto err_egress_block_get;
-
-       net_inc_ingress_queue();
-       net_inc_egress_queue();
-
-       sch->flags |= TCQ_F_CPUSTATS;
-
-       return 0;
-
-err_egress_block_get:
-       tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
-       return err;
+       return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
 }
 
 static void clsact_destroy(struct Qdisc *sch)
@@ -228,6 +213,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
        .cl_ops         =       &clsact_class_ops,
        .id             =       "clsact",
        .priv_size      =       sizeof(struct clsact_sched_data),
+       .static_flags   =       TCQ_F_CPUSTATS,
        .init           =       clsact_init,
        .destroy        =       clsact_destroy,
        .dump           =       ingress_dump,
index 7f8ea9e..f0747eb 100644 (file)
@@ -157,6 +157,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
                .handle = sch->handle,
                .parent = sch->parent,
        };
+       int err;
 
        if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
                return -EOPNOTSUPP;
@@ -171,7 +172,14 @@ static int red_offload(struct Qdisc *sch, bool enable)
                opt.command = TC_RED_DESTROY;
        }
 
-       return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
+       err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
+
+       if (!err && enable)
+               sch->flags |= TCQ_F_OFFLOADED;
+       else
+               sch->flags &= ~TCQ_F_OFFLOADED;
+
+       return err;
 }
 
 static void red_destroy(struct Qdisc *sch)
@@ -212,6 +220,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_RED_PARMS]);
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
 
        if (ctl->limit > 0) {
                child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
@@ -272,7 +282,7 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt)
        return red_change(sch, opt);
 }
 
-static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt)
+static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct tc_red_qopt_offload hw_stats = {
@@ -284,21 +294,12 @@ static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt)
                        .stats.qstats = &sch->qstats,
                },
        };
-       int err;
 
-       opt->flags &= ~TC_RED_OFFLOADED;
-       if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
-               return 0;
-
-       err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
-                                           &hw_stats);
-       if (err == -EOPNOTSUPP)
+       if (!(sch->flags & TCQ_F_OFFLOADED))
                return 0;
 
-       if (!err)
-               opt->flags |= TC_RED_OFFLOADED;
-
-       return err;
+       return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+                                            &hw_stats);
 }
 
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -317,7 +318,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        int err;
 
        sch->qstats.backlog = q->qdisc->qstats.backlog;
-       err = red_dump_offload(sch, &opt);
+       err = red_dump_offload_stats(sch, &opt);
        if (err)
                goto nla_put_failure;
 
@@ -345,7 +346,7 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
                .marked = q->stats.prob_mark + q->stats.forced_mark,
        };
 
-       if (tc_can_offload(dev) &&  dev->netdev_ops->ndo_setup_tc) {
+       if (sch->flags & TCQ_F_OFFLOADED) {
                struct red_stats hw_stats = {0};
                struct tc_red_qopt_offload hw_stats_request = {
                        .command = TC_RED_XSTATS,
index 890f4a4..930e5bd 100644 (file)
@@ -639,6 +639,9 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        if (ctl->divisor &&
            (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
                return -EINVAL;
+       if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
+                                       ctl_v1->Wlog))
+               return -EINVAL;
        if (ctl_v1 && ctl_v1->qth_min) {
                p = kmalloc(sizeof(*p), GFP_KERNEL);
                if (!p)
@@ -724,6 +727,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        int i;
        int err;
 
+       q->sch = sch;
        timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
 
        err = tcf_block_get(&q->block, &q->filter_list, sch);
index 7b261af..7f8baa4 100644 (file)
@@ -53,6 +53,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
        msg->send_failed = 0;
        msg->send_error = 0;
        msg->can_delay = 1;
+       msg->abandoned = 0;
        msg->expires_at = 0;
        INIT_LIST_HEAD(&msg->chunks);
 }
@@ -304,6 +305,13 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
        if (!chunk->asoc->peer.prsctp_capable)
                return 0;
 
+       if (chunk->msg->abandoned)
+               return 1;
+
+       if (!chunk->has_tsn &&
+           !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
+               return 0;
+
        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
            time_after(jiffies, chunk->msg->expires_at)) {
                struct sctp_stream_out *streamout =
@@ -316,6 +324,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
                        chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
                        streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
                }
+               chunk->msg->abandoned = 1;
                return 1;
        } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
                   chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
@@ -324,10 +333,12 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 
                chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
                streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
+               chunk->msg->abandoned = 1;
                return 1;
        } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
                   chunk->msg->expires_at &&
                   time_after(jiffies, chunk->msg->expires_at)) {
+               chunk->msg->abandoned = 1;
                return 1;
        }
        /* PRIO policy is processed by sendmsg, not here */
index 3f619fd..291c97b 100644 (file)
@@ -78,6 +78,9 @@ const char *sctp_cname(const union sctp_subtype cid)
        case SCTP_CID_AUTH:
                return "AUTH";
 
+       case SCTP_CID_RECONF:
+               return "RECONF";
+
        default:
                break;
        }
index 621b5ca..141c9c4 100644 (file)
@@ -399,20 +399,24 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
                return;
        }
 
-       if (t->param_flags & SPP_PMTUD_ENABLE) {
-               /* Update transports view of the MTU */
-               sctp_transport_update_pmtu(t, pmtu);
-
-               /* Update association pmtu. */
-               sctp_assoc_sync_pmtu(asoc);
-       }
+       if (!(t->param_flags & SPP_PMTUD_ENABLE))
+               /* We can't allow retransmitting in such case, as the
+                * retransmission would be sized just as before, and thus we
+                * would get another icmp, and retransmit again.
+                */
+               return;
 
-       /* Retransmit with the new pmtu setting.
-        * Normally, if PMTU discovery is disabled, an ICMP Fragmentation
-        * Needed will never be sent, but if a message was sent before
-        * PMTU discovery was disabled that was larger than the PMTU, it
-        * would not be fragmented, so it must be re-transmitted fragmented.
+       /* Update transports view of the MTU. Return if no update was needed.
+        * If an update wasn't needed/possible, it also doesn't make sense to
+        * try to retransmit now.
         */
+       if (!sctp_transport_update_pmtu(t, pmtu))
+               return;
+
+       /* Update association pmtu. */
+       sctp_assoc_sync_pmtu(asoc);
+
+       /* Retransmit with the new pmtu setting. */
        sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
 }
 
index 3b18085..5d4c15b 100644 (file)
@@ -826,6 +826,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
        case AF_INET:
                if (!__ipv6_only_sock(sctp_opt2sk(sp)))
                        return 1;
+               /* fallthru */
        default:
                return 0;
        }
index 275925b..35bc710 100644 (file)
@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct sctphdr *sh;
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
+               goto out;
+
        sh = sctp_hdr(skb);
        if (!pskb_may_pull(skb, sizeof(*sh)))
                goto out;
index 4db012a..c4ec99b 100644 (file)
@@ -364,10 +364,12 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
        list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
                struct sctp_stream_out *streamout;
 
-               if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
+               if (!chk->msg->abandoned &&
+                   (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
+                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
                        continue;
 
+               chk->msg->abandoned = 1;
                list_del_init(&chk->transmitted_list);
                sctp_insert_list(&asoc->outqueue.abandoned,
                                 &chk->transmitted_list);
@@ -377,7 +379,8 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
                asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
                streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
 
-               if (!chk->tsn_gap_acked) {
+               if (queue != &asoc->outqueue.retransmit &&
+                   !chk->tsn_gap_acked) {
                        if (chk->transport)
                                chk->transport->flight_size -=
                                                sctp_data_size(chk);
@@ -403,10 +406,13 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
        q->sched->unsched_all(&asoc->stream);
 
        list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
-               if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
+               if (!chk->msg->abandoned &&
+                   (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
+                    !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
+                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
                        continue;
 
+               chk->msg->abandoned = 1;
                sctp_sched_dequeue_common(q, chk);
                asoc->sent_cnt_removable--;
                asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
@@ -912,9 +918,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        break;
 
                case SCTP_CID_ABORT:
-                       if (sctp_test_T_bit(chunk)) {
+                       if (sctp_test_T_bit(chunk))
                                packet->vtag = asoc->c.my_vtag;
-                       }
+                       /* fallthru */
                /* The following chunks are "response" chunks, i.e.
                 * they are generated in response to something we
                 * received.  If we are sending these, then we can
@@ -1434,7 +1440,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                        /* If this chunk has not been acked, stop
                         * considering it as 'outstanding'.
                         */
-                       if (!tchunk->tsn_gap_acked) {
+                       if (transmitted_queue != &q->retransmit &&
+                           !tchunk->tsn_gap_acked) {
                                if (tchunk->transport)
                                        tchunk->transport->flight_size -=
                                                        sctp_data_size(tchunk);
index f5172c2..6a38c25 100644 (file)
@@ -1499,6 +1499,7 @@ static __init int sctp_init(void)
        INIT_LIST_HEAD(&sctp_address_families);
        sctp_v4_pf_init();
        sctp_v6_pf_init();
+       sctp_sched_ops_init();
 
        status = register_pernet_subsys(&sctp_defaults_ops);
        if (status)
index 3204a9b..039fcb6 100644 (file)
@@ -85,7 +85,7 @@
 static int sctp_writeable(struct sock *sk);
 static void sctp_wfree(struct sk_buff *skb);
 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
-                               size_t msg_len, struct sock **orig_sk);
+                               size_t msg_len);
 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
 static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -188,13 +188,13 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
                list_for_each_entry(chunk, &t->transmitted, transmitted_list)
                        cb(chunk);
 
-       list_for_each_entry(chunk, &q->retransmit, list)
+       list_for_each_entry(chunk, &q->retransmit, transmitted_list)
                cb(chunk);
 
-       list_for_each_entry(chunk, &q->sacked, list)
+       list_for_each_entry(chunk, &q->sacked, transmitted_list)
                cb(chunk);
 
-       list_for_each_entry(chunk, &q->abandoned, list)
+       list_for_each_entry(chunk, &q->abandoned, transmitted_list)
                cb(chunk);
 
        list_for_each_entry(chunk, &q->out_chunk_list, list)
@@ -335,16 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
        if (len < sizeof (struct sockaddr))
                return NULL;
 
+       if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+               return NULL;
+
        /* V4 mapped address are really of AF_INET family */
        if (addr->sa.sa_family == AF_INET6 &&
-           ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
-               if (!opt->pf->af_supported(AF_INET, opt))
-                       return NULL;
-       } else {
-               /* Does this PF support this AF? */
-               if (!opt->pf->af_supported(addr->sa.sa_family, opt))
-                       return NULL;
-       }
+           ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+           !opt->pf->af_supported(AF_INET, opt))
+               return NULL;
 
        /* If we get this far, af is valid. */
        af = sctp_get_af_specific(addr->sa.sa_family);
@@ -1883,8 +1881,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
                 */
                if (sinit) {
                        if (sinit->sinit_num_ostreams) {
-                               asoc->c.sinit_num_ostreams =
-                                       sinit->sinit_num_ostreams;
+                               __u16 outcnt = sinit->sinit_num_ostreams;
+
+                               asoc->c.sinit_num_ostreams = outcnt;
+                               /* outcnt has been changed, so re-init stream */
+                               err = sctp_stream_init(&asoc->stream, outcnt, 0,
+                                                      GFP_KERNEL);
+                               if (err)
+                                       goto out_free;
                        }
                        if (sinit->sinit_max_instreams) {
                                asoc->c.sinit_max_instreams =
@@ -1971,7 +1975,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
        if (!sctp_wspace(asoc)) {
                /* sk can be changed by peel off when waiting for buf. */
-               err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
+               err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
                if (err) {
                        if (err == -ESRCH) {
                                /* asoc is already dead. */
@@ -2277,7 +2281,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
 
                if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
                        event = sctp_ulpevent_make_sender_dry_event(asoc,
-                                       GFP_ATOMIC);
+                                       GFP_USER | __GFP_NOWARN);
                        if (!event)
                                return -ENOMEM;
 
@@ -3498,6 +3502,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
 
        if (optlen < sizeof(struct sctp_hmacalgo))
                return -EINVAL;
+       optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
+                                            SCTP_AUTH_NUM_HMACS * sizeof(u16));
 
        hmacs = memdup_user(optval, optlen);
        if (IS_ERR(hmacs))
@@ -3536,6 +3542,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
 
        if (optlen <= sizeof(struct sctp_authkey))
                return -EINVAL;
+       /* authkey->sca_keylength is u16, so optlen can't be bigger than
+        * this.
+        */
+       optlen = min_t(unsigned int, optlen, USHRT_MAX +
+                                            sizeof(struct sctp_authkey));
 
        authkey = memdup_user(optval, optlen);
        if (IS_ERR(authkey))
@@ -3891,13 +3902,20 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
        struct sctp_association *asoc;
        int retval = -EINVAL;
 
-       if (optlen < sizeof(struct sctp_reset_streams))
+       if (optlen < sizeof(*params))
                return -EINVAL;
+       /* srs_number_streams is u16, so optlen can't be bigger than this. */
+       optlen = min_t(unsigned int, optlen, USHRT_MAX +
+                                            sizeof(__u16) * sizeof(*params));
 
        params = memdup_user(optval, optlen);
        if (IS_ERR(params))
                return PTR_ERR(params);
 
+       if (params->srs_number_streams * sizeof(__u16) >
+           optlen - sizeof(*params))
+               goto out;
+
        asoc = sctp_id2assoc(sk, params->srs_assoc_id);
        if (!asoc)
                goto out;
@@ -4494,7 +4512,7 @@ static int sctp_init_sock(struct sock *sk)
        SCTP_DBG_OBJCNT_INC(sock);
 
        local_bh_disable();
-       percpu_counter_inc(&sctp_sockets_allocated);
+       sk_sockets_allocated_inc(sk);
        sock_prot_inuse_add(net, sk->sk_prot, 1);
 
        /* Nothing can fail after this block, otherwise
@@ -4538,7 +4556,7 @@ static void sctp_destroy_sock(struct sock *sk)
        }
        sctp_endpoint_free(sp->ep);
        local_bh_disable();
-       percpu_counter_dec(&sctp_sockets_allocated);
+       sk_sockets_allocated_dec(sk);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
        local_bh_enable();
 }
@@ -5011,7 +5029,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
        len = sizeof(int);
        if (put_user(len, optlen))
                return -EFAULT;
-       if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
+       if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
                return -EFAULT;
        return 0;
 }
@@ -5080,7 +5098,6 @@ static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *p
        *newfile = sock_alloc_file(newsock, 0, NULL);
        if (IS_ERR(*newfile)) {
                put_unused_fd(retval);
-               sock_release(newsock);
                retval = PTR_ERR(*newfile);
                *newfile = NULL;
                return retval;
@@ -5642,6 +5659,9 @@ copy_getaddrs:
                err = -EFAULT;
                goto out;
        }
+       /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
+        * but we can't change it anymore.
+        */
        if (put_user(bytes_copied, optlen))
                err = -EFAULT;
 out:
@@ -6078,7 +6098,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
                params.assoc_id = 0;
        } else if (len >= sizeof(struct sctp_assoc_value)) {
                len = sizeof(struct sctp_assoc_value);
-               if (copy_from_user(&params, optval, sizeof(params)))
+               if (copy_from_user(&params, optval, len))
                        return -EFAULT;
        } else
                return -EINVAL;
@@ -6248,7 +6268,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
 
        if (len < sizeof(struct sctp_authkeyid))
                return -EINVAL;
-       if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
+
+       len = sizeof(struct sctp_authkeyid);
+       if (copy_from_user(&val, optval, len))
                return -EFAULT;
 
        asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -6260,7 +6282,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
        else
                val.scact_keynumber = ep->active_key_id;
 
-       len = sizeof(struct sctp_authkeyid);
        if (put_user(len, optlen))
                return -EFAULT;
        if (copy_to_user(optval, &val, len))
@@ -6286,7 +6307,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
        if (len < sizeof(struct sctp_authchunks))
                return -EINVAL;
 
-       if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
+       if (copy_from_user(&val, optval, sizeof(val)))
                return -EFAULT;
 
        to = p->gauth_chunks;
@@ -6331,7 +6352,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
        if (len < sizeof(struct sctp_authchunks))
                return -EINVAL;
 
-       if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
+       if (copy_from_user(&val, optval, sizeof(val)))
                return -EFAULT;
 
        to = p->gauth_chunks;
@@ -7999,12 +8020,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
 
 /* Helper function to wait for space in the sndbuf.  */
 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
-                               size_t msg_len, struct sock **orig_sk)
+                               size_t msg_len)
 {
        struct sock *sk = asoc->base.sk;
-       int err = 0;
        long current_timeo = *timeo_p;
        DEFINE_WAIT(wait);
+       int err = 0;
 
        pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
                 *timeo_p, msg_len);
@@ -8033,17 +8054,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                release_sock(sk);
                current_timeo = schedule_timeout(current_timeo);
                lock_sock(sk);
-               if (sk != asoc->base.sk) {
-                       release_sock(sk);
-                       sk = asoc->base.sk;
-                       lock_sock(sk);
-               }
+               if (sk != asoc->base.sk)
+                       goto do_error;
 
                *timeo_p = current_timeo;
        }
 
 out:
-       *orig_sk = sk;
        finish_wait(&asoc->wait, &wait);
 
        /* Release the association's refcnt.  */
index a11db21..524dfeb 100644 (file)
@@ -64,7 +64,7 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
                 */
 
                /* Mark as failed send. */
-               sctp_chunk_fail(ch, SCTP_ERROR_INV_STRM);
+               sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
                if (asoc->peer.prsctp_capable &&
                    SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
                        asoc->sent_cnt_removable--;
@@ -156,9 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
        sctp_stream_outq_migrate(stream, NULL, outcnt);
        sched->sched_all(stream);
 
-       i = sctp_stream_alloc_out(stream, outcnt, gfp);
-       if (i)
-               return i;
+       ret = sctp_stream_alloc_out(stream, outcnt, gfp);
+       if (ret)
+               goto out;
 
        stream->outcnt = outcnt;
        for (i = 0; i < stream->outcnt; i++)
@@ -170,19 +170,17 @@ in:
        if (!incnt)
                goto out;
 
-       i = sctp_stream_alloc_in(stream, incnt, gfp);
-       if (i) {
-               ret = -ENOMEM;
-               goto free;
+       ret = sctp_stream_alloc_in(stream, incnt, gfp);
+       if (ret) {
+               sched->free(stream);
+               kfree(stream->out);
+               stream->out = NULL;
+               stream->outcnt = 0;
+               goto out;
        }
 
        stream->incnt = incnt;
-       goto out;
 
-free:
-       sched->free(stream);
-       kfree(stream->out);
-       stream->out = NULL;
 out:
        return ret;
 }
@@ -254,6 +252,30 @@ static int sctp_send_reconf(struct sctp_association *asoc,
        return retval;
 }
 
+static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
+                                     __u16 str_nums, __be16 *str_list)
+{
+       struct sctp_association *asoc;
+       __u16 i;
+
+       asoc = container_of(stream, struct sctp_association, stream);
+       if (!asoc->outqueue.out_qlen)
+               return true;
+
+       if (!str_nums)
+               return false;
+
+       for (i = 0; i < str_nums; i++) {
+               __u16 sid = ntohs(str_list[i]);
+
+               if (stream->out[sid].ext &&
+                   !list_empty(&stream->out[sid].ext->outq))
+                       return false;
+       }
+
+       return true;
+}
+
 int sctp_send_reset_streams(struct sctp_association *asoc,
                            struct sctp_reset_streams *params)
 {
@@ -317,6 +339,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
        for (i = 0; i < str_nums; i++)
                nstr_list[i] = htons(str_list[i]);
 
+       if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+               retval = -EAGAIN;
+               goto out;
+       }
+
        chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
 
        kfree(nstr_list);
@@ -377,6 +404,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
        if (asoc->strreset_outstanding)
                return -EINPROGRESS;
 
+       if (!sctp_outq_is_empty(&asoc->outqueue))
+               return -EAGAIN;
+
        chunk = sctp_make_strreset_tsnreq(asoc);
        if (!chunk)
                return -ENOMEM;
@@ -563,7 +593,7 @@ struct sctp_chunk *sctp_process_strreset_outreq(
                flags = SCTP_STREAM_RESET_INCOMING_SSN;
        }
 
-       nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2;
+       nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
        if (nums) {
                str_p = outreq->list_of_streams;
                for (i = 0; i < nums; i++) {
@@ -627,7 +657,7 @@ struct sctp_chunk *sctp_process_strreset_inreq(
                goto out;
        }
 
-       nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2;
+       nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
        str_p = inreq->list_of_streams;
        for (i = 0; i < nums; i++) {
                if (ntohs(str_p[i]) >= stream->outcnt) {
@@ -636,6 +666,12 @@ struct sctp_chunk *sctp_process_strreset_inreq(
                }
        }
 
+       if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
+               result = SCTP_STRRESET_IN_PROGRESS;
+               asoc->strreset_inseq--;
+               goto err;
+       }
+
        chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
        if (!chunk)
                goto out;
@@ -687,12 +723,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
                i = asoc->strreset_inseq - request_seq - 1;
                result = asoc->strreset_result[i];
                if (result == SCTP_STRRESET_PERFORMED) {
-                       next_tsn = asoc->next_tsn;
+                       next_tsn = asoc->ctsn_ack_point + 1;
                        init_tsn =
                                sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
                }
                goto err;
        }
+
+       if (!sctp_outq_is_empty(&asoc->outqueue)) {
+               result = SCTP_STRRESET_IN_PROGRESS;
+               goto err;
+       }
+
        asoc->strreset_inseq++;
 
        if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
@@ -703,9 +745,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
                goto out;
        }
 
-       /* G3: The same processing as though a SACK chunk with no gap report
-        *     and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
-        *     received MUST be performed.
+       /* G4: The same processing as though a FWD-TSN chunk (as defined in
+        *     [RFC3758]) with all streams affected and a new cumulative TSN
+        *     ACK of the Receiver's Next TSN minus 1 were received MUST be
+        *     performed.
         */
        max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
        sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
@@ -720,10 +763,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
        sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
                         init_tsn, GFP_ATOMIC);
 
-       /* G4: The same processing as though a FWD-TSN chunk (as defined in
-        *     [RFC3758]) with all streams affected and a new cumulative TSN
-        *     ACK of the Receiver's Next TSN minus 1 were received MUST be
-        *     performed.
+       /* G3: The same processing as though a SACK chunk with no gap report
+        *     and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
+        *     received MUST be performed.
         */
        sctp_outq_free(&asoc->outqueue);
 
@@ -927,7 +969,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
 
                outreq = (struct sctp_strreset_outreq *)req;
                str_p = outreq->list_of_streams;
-               nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2;
+               nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
+                      sizeof(__u16);
 
                if (result == SCTP_STRRESET_PERFORMED) {
                        if (nums) {
@@ -956,7 +999,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
 
                inreq = (struct sctp_strreset_inreq *)req;
                str_p = inreq->list_of_streams;
-               nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
+               nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
+                      sizeof(__u16);
 
                *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
                        nums, str_p, GFP_ATOMIC);
@@ -975,6 +1019,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
                if (result == SCTP_STRRESET_PERFORMED) {
                        __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
                                                &asoc->peer.tsn_map);
+                       LIST_HEAD(temp);
 
                        sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
                        sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
@@ -983,7 +1028,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
                                         SCTP_TSN_MAP_INITIAL,
                                         stsn, GFP_ATOMIC);
 
+                       /* Clean up sacked and abandoned queues only. As the
+                        * out_chunk_list may not be empty, splice it to temp,
+                        * then get it back after sctp_outq_free is done.
+                        */
+                       list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
                        sctp_outq_free(&asoc->outqueue);
+                       list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
 
                        asoc->next_tsn = rtsn;
                        asoc->ctsn_ack_point = asoc->next_tsn - 1;
index 0b83ec5..d8c162a 100644 (file)
@@ -119,16 +119,27 @@ static struct sctp_sched_ops sctp_sched_fcfs = {
        .unsched_all = sctp_sched_fcfs_unsched_all,
 };
 
+static void sctp_sched_ops_fcfs_init(void)
+{
+       sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
+}
+
 /* API to other parts of the stack */
 
-extern struct sctp_sched_ops sctp_sched_prio;
-extern struct sctp_sched_ops sctp_sched_rr;
+static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
 
-static struct sctp_sched_ops *sctp_sched_ops[] = {
-       &sctp_sched_fcfs,
-       &sctp_sched_prio,
-       &sctp_sched_rr,
-};
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+                            struct sctp_sched_ops *sched_ops)
+{
+       sctp_sched_ops[sched] = sched_ops;
+}
+
+void sctp_sched_ops_init(void)
+{
+       sctp_sched_ops_fcfs_init();
+       sctp_sched_ops_prio_init();
+       sctp_sched_ops_rr_init();
+}
 
 int sctp_sched_set_sched(struct sctp_association *asoc,
                         enum sctp_sched_type sched)
index 384dbf3..7997d35 100644 (file)
@@ -333,7 +333,7 @@ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream)
                        sctp_sched_prio_unsched(soute);
 }
 
-struct sctp_sched_ops sctp_sched_prio = {
+static struct sctp_sched_ops sctp_sched_prio = {
        .set = sctp_sched_prio_set,
        .get = sctp_sched_prio_get,
        .init = sctp_sched_prio_init,
@@ -345,3 +345,8 @@ struct sctp_sched_ops sctp_sched_prio = {
        .sched_all = sctp_sched_prio_sched_all,
        .unsched_all = sctp_sched_prio_unsched_all,
 };
+
+void sctp_sched_ops_prio_init(void)
+{
+       sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio);
+}
index 7612a43..1155692 100644 (file)
@@ -187,7 +187,7 @@ static void sctp_sched_rr_unsched_all(struct sctp_stream *stream)
                sctp_sched_rr_unsched(stream, soute);
 }
 
-struct sctp_sched_ops sctp_sched_rr = {
+static struct sctp_sched_ops sctp_sched_rr = {
        .set = sctp_sched_rr_set,
        .get = sctp_sched_rr_get,
        .init = sctp_sched_rr_init,
@@ -199,3 +199,8 @@ struct sctp_sched_ops sctp_sched_rr = {
        .sched_all = sctp_sched_rr_sched_all,
        .unsched_all = sctp_sched_rr_unsched_all,
 };
+
+void sctp_sched_ops_rr_init(void)
+{
+       sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr);
+}
index 1e5a224..47f82bd 100644 (file)
@@ -248,28 +248,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 }
 
-void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
        struct dst_entry *dst = sctp_transport_dst_check(t);
+       bool change = true;
 
        if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
-               pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
-                       __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
-               /* Use default minimum segment size and disable
-                * pmtu discovery on this transport.
-                */
-               t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
-       } else {
-               t->pathmtu = pmtu;
+               pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
+                                   __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
+               /* Use default minimum segment instead */
+               pmtu = SCTP_DEFAULT_MINSEGMENT;
        }
+       pmtu = SCTP_TRUNC4(pmtu);
 
        if (dst) {
                dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
                dst = sctp_transport_dst_check(t);
        }
 
-       if (!dst)
+       if (!dst) {
                t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
+               dst = t->dst;
+       }
+
+       if (dst) {
+               /* Re-fetch, as under layers may have a higher minimum size */
+               pmtu = SCTP_TRUNC4(dst_mtu(dst));
+               change = t->pathmtu != pmtu;
+       }
+       t->pathmtu = pmtu;
+
+       return change;
 }
 
 /* Caches the dst entry and source address for a transport's destination
index a71be33..e36ec5d 100644 (file)
@@ -1084,29 +1084,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                      gfp_t gfp)
 {
-       struct sctp_association *asoc;
-       __u16 needed, freed;
-
-       asoc = ulpq->asoc;
+       struct sctp_association *asoc = ulpq->asoc;
+       __u32 freed = 0;
+       __u16 needed;
 
-       if (chunk) {
-               needed = ntohs(chunk->chunk_hdr->length);
-               needed -= sizeof(struct sctp_data_chunk);
-       } else
-               needed = SCTP_DEFAULT_MAXWINDOW;
-
-       freed = 0;
+       needed = ntohs(chunk->chunk_hdr->length) -
+                sizeof(struct sctp_data_chunk);
 
        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
                freed = sctp_ulpq_renege_order(ulpq, needed);
-               if (freed < needed) {
+               if (freed < needed)
                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
-               }
        }
        /* If able to free enough room, accept this chunk. */
-       if (chunk && (freed >= needed)) {
-               int retval;
-               retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+       if (freed >= needed) {
+               int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
                /*
                 * Enter partial delivery if chunk has not been
                 * delivered; otherwise, drain the reassembly queue.
index 42d8e9c..6f05d5c 100644 (file)
@@ -406,8 +406,10 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
                name.len = strlen(name.name);
        }
        path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
-       if (unlikely(!path.dentry))
+       if (unlikely(!path.dentry)) {
+               sock_release(sock);
                return ERR_PTR(-ENOMEM);
+       }
        path.mnt = mntget(sock_mnt);
 
        d_instantiate(path.dentry, SOCK_INODE(sock));
@@ -415,9 +417,11 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
        file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
                  &socket_file_ops);
        if (IS_ERR(file)) {
-               /* drop dentry, keep inode */
+               /* drop dentry, keep inode for a bit */
                ihold(d_inode(path.dentry));
                path_put(&path);
+               /* ... and now kill it properly */
+               sock_release(sock);
                return file;
        }
 
@@ -432,8 +436,10 @@ static int sock_map_fd(struct socket *sock, int flags)
 {
        struct file *newfile;
        int fd = get_unused_fd_flags(flags);
-       if (unlikely(fd < 0))
+       if (unlikely(fd < 0)) {
+               sock_release(sock);
                return fd;
+       }
 
        newfile = sock_alloc_file(sock, flags, NULL);
        if (likely(!IS_ERR(newfile))) {
@@ -1330,19 +1336,9 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
 
        retval = sock_create(family, type, protocol, &sock);
        if (retval < 0)
-               goto out;
-
-       retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
-       if (retval < 0)
-               goto out_release;
-
-out:
-       /* It may be already another descriptor 8) Not kernel problem. */
-       return retval;
+               return retval;
 
-out_release:
-       sock_release(sock);
-       return retval;
+       return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
 }
 
 /*
@@ -1365,88 +1361,73 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
        if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
                flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
 
+       /*
+        * reserve descriptors and make sure we won't fail
+        * to return them to userland.
+        */
+       fd1 = get_unused_fd_flags(flags);
+       if (unlikely(fd1 < 0))
+               return fd1;
+
+       fd2 = get_unused_fd_flags(flags);
+       if (unlikely(fd2 < 0)) {
+               put_unused_fd(fd1);
+               return fd2;
+       }
+
+       err = put_user(fd1, &usockvec[0]);
+       if (err)
+               goto out;
+
+       err = put_user(fd2, &usockvec[1]);
+       if (err)
+               goto out;
+
        /*
         * Obtain the first socket and check if the underlying protocol
         * supports the socketpair call.
         */
 
        err = sock_create(family, type, protocol, &sock1);
-       if (err < 0)
+       if (unlikely(err < 0))
                goto out;
 
        err = sock_create(family, type, protocol, &sock2);
-       if (err < 0)
-               goto out_release_1;
-
-       err = sock1->ops->socketpair(sock1, sock2);
-       if (err < 0)
-               goto out_release_both;
-
-       fd1 = get_unused_fd_flags(flags);
-       if (unlikely(fd1 < 0)) {
-               err = fd1;
-               goto out_release_both;
+       if (unlikely(err < 0)) {
+               sock_release(sock1);
+               goto out;
        }
 
-       fd2 = get_unused_fd_flags(flags);
-       if (unlikely(fd2 < 0)) {
-               err = fd2;
-               goto out_put_unused_1;
+       err = sock1->ops->socketpair(sock1, sock2);
+       if (unlikely(err < 0)) {
+               sock_release(sock2);
+               sock_release(sock1);
+               goto out;
        }
 
        newfile1 = sock_alloc_file(sock1, flags, NULL);
        if (IS_ERR(newfile1)) {
                err = PTR_ERR(newfile1);
-               goto out_put_unused_both;
+               sock_release(sock2);
+               goto out;
        }
 
        newfile2 = sock_alloc_file(sock2, flags, NULL);
        if (IS_ERR(newfile2)) {
                err = PTR_ERR(newfile2);
-               goto out_fput_1;
+               fput(newfile1);
+               goto out;
        }
 
-       err = put_user(fd1, &usockvec[0]);
-       if (err)
-               goto out_fput_both;
-
-       err = put_user(fd2, &usockvec[1]);
-       if (err)
-               goto out_fput_both;
-
        audit_fd_pair(fd1, fd2);
 
        fd_install(fd1, newfile1);
        fd_install(fd2, newfile2);
-       /* fd1 and fd2 may be already another descriptors.
-        * Not kernel problem.
-        */
-
        return 0;
 
-out_fput_both:
-       fput(newfile2);
-       fput(newfile1);
-       put_unused_fd(fd2);
-       put_unused_fd(fd1);
-       goto out;
-
-out_fput_1:
-       fput(newfile1);
-       put_unused_fd(fd2);
-       put_unused_fd(fd1);
-       sock_release(sock2);
-       goto out;
-
-out_put_unused_both:
+out:
        put_unused_fd(fd2);
-out_put_unused_1:
        put_unused_fd(fd1);
-out_release_both:
-       sock_release(sock2);
-out_release_1:
-       sock_release(sock1);
-out:
        return err;
 }
 
@@ -1562,7 +1543,6 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
        if (IS_ERR(newfile)) {
                err = PTR_ERR(newfile);
                put_unused_fd(newfd);
-               sock_release(newsock);
                goto out_put;
        }
 
@@ -2641,6 +2621,15 @@ out_fs:
 
 core_initcall(sock_init);      /* early initcall */
 
+static int __init jit_init(void)
+{
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+       bpf_jit_enable = 1;
+#endif
+       return 0;
+}
+pure_initcall(jit_init);
+
 #ifdef CONFIG_PROC_FS
 void socket_seq_show(struct seq_file *seq)
 {
index c5fda15..1fdab5c 100644 (file)
@@ -401,7 +401,7 @@ void strp_data_ready(struct strparser *strp)
         * allows a thread in BH context to safely check if the process
         * lock is held. In this case, if the lock is held, queue work.
         */
-       if (sock_owned_by_user(strp->sk)) {
+       if (sock_owned_by_user_nocheck(strp->sk)) {
                queue_work(strp_wq, &strp->work);
                return;
        }
index c4778ca..444380f 100644 (file)
@@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
                        goto out_free_groups;
                creds->cr_group_info->gid[i] = kgid;
        }
+       groups_sort(creds->cr_group_info);
 
        return 0;
 out_free_groups:
index 73165e9..2653119 100644 (file)
@@ -264,7 +264,7 @@ out:
        return status;
 }
 
-static struct cache_detail rsi_cache_template = {
+static const struct cache_detail rsi_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = RSI_HASHMAX,
        .name           = "auth.rpcsec.init",
@@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd,
                                goto out;
                        rsci.cred.cr_group_info->gid[i] = kgid;
                }
+               groups_sort(rsci.cred.cr_group_info);
 
                /* mech name */
                len = qword_get(&mesg, buf, mlen);
@@ -524,7 +525,7 @@ out:
        return status;
 }
 
-static struct cache_detail rsc_cache_template = {
+static const struct cache_detail rsc_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = RSC_HASHMAX,
        .name           = "auth.rpcsec.context",
index 79d55d9..e689438 100644 (file)
@@ -1674,7 +1674,7 @@ void cache_unregister_net(struct cache_detail *cd, struct net *net)
 }
 EXPORT_SYMBOL_GPL(cache_unregister_net);
 
-struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
+struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
 {
        struct cache_detail *cd;
        int i;
index a801da8..e2a4184 100644 (file)
@@ -1841,6 +1841,7 @@ call_bind_status(struct rpc_task *task)
        case -ECONNABORTED:
        case -ENOTCONN:
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -ENOBUFS:
@@ -1917,6 +1918,7 @@ call_connect_status(struct rpc_task *task)
                /* fall through */
        case -ECONNRESET:
        case -ECONNABORTED:
+       case -ENETDOWN:
        case -ENETUNREACH:
        case -EHOSTUNREACH:
        case -EADDRINUSE:
@@ -2022,6 +2024,7 @@ call_transmit_status(struct rpc_task *task)
                 */
        case -ECONNREFUSED:
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -EPERM:
@@ -2071,6 +2074,7 @@ call_bc_transmit(struct rpc_task *task)
        switch (task->tk_status) {
        case 0:
                /* Success */
+       case -ENETDOWN:
        case -EHOSTDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
@@ -2139,6 +2143,7 @@ call_status(struct rpc_task *task)
        task->tk_status = 0;
        switch(status) {
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -EPERM:
index f81eaa8..af7f28f 100644 (file)
@@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd,
                ug.gi->gid[i] = kgid;
        }
 
+       groups_sort(ug.gi);
        ugp = unix_gid_lookup(cd, uid);
        if (ugp) {
                struct cache_head *ch;
@@ -569,7 +570,7 @@ static int unix_gid_show(struct seq_file *m,
        return 0;
 }
 
-static struct cache_detail unix_gid_cache_template = {
+static const struct cache_detail unix_gid_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = GID_HASHMAX,
        .name           = "auth.unix.gid",
@@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
                kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
                cred->cr_group_info->gid[i] = kgid;
        }
+       groups_sort(cred->cr_group_info);
        if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
                *authp = rpc_autherr_badverf;
                return SVC_DENIED;
@@ -862,7 +864,7 @@ struct auth_ops svcauth_unix = {
        .set_client     = svcauth_unix_set_client,
 };
 
-static struct cache_detail ip_map_cache_template = {
+static const struct cache_detail ip_map_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = IP_HASHMAX,
        .name           = "auth.unix.ip",
index 333b9d6..33b74fd 100644 (file)
@@ -1001,6 +1001,7 @@ void xprt_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
+       unsigned int connect_cookie;
        int status, numreqs;
 
        dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
@@ -1024,6 +1025,7 @@ void xprt_transmit(struct rpc_task *task)
        } else if (!req->rq_bytes_sent)
                return;
 
+       connect_cookie = xprt->connect_cookie;
        req->rq_xtime = ktime_get();
        status = xprt->ops->send_request(task);
        trace_xprt_transmit(xprt, req->rq_xid, status);
@@ -1047,20 +1049,28 @@ void xprt_transmit(struct rpc_task *task)
        xprt->stat.bklog_u += xprt->backlog.qlen;
        xprt->stat.sending_u += xprt->sending.qlen;
        xprt->stat.pending_u += xprt->pending.qlen;
+       spin_unlock_bh(&xprt->transport_lock);
 
-       /* Don't race with disconnect */
-       if (!xprt_connected(xprt))
-               task->tk_status = -ENOTCONN;
-       else {
+       req->rq_connect_cookie = connect_cookie;
+       if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) {
                /*
-                * Sleep on the pending queue since
-                * we're expecting a reply.
+                * Sleep on the pending queue if we're expecting a reply.
+                * The spinlock ensures atomicity between the test of
+                * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
                 */
-               if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
+               spin_lock(&xprt->recv_lock);
+               if (!req->rq_reply_bytes_recvd) {
                        rpc_sleep_on(&xprt->pending, task, xprt_timer);
-               req->rq_connect_cookie = xprt->connect_cookie;
+                       /*
+                        * Send an extra queue wakeup call if the
+                        * connection was dropped in case the call to
+                        * rpc_sleep_on() raced.
+                        */
+                       if (!xprt_connected(xprt))
+                               xprt_wake_pending_tasks(xprt, -ENOTCONN);
+               }
+               spin_unlock(&xprt->recv_lock);
        }
-       spin_unlock_bh(&xprt->transport_lock);
 }
 
 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
index ed34dc0..a3f2ab2 100644 (file)
@@ -1408,11 +1408,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
        dprintk("RPC:       %s: reply %p completes request %p (xid 0x%08x)\n",
                __func__, rep, req, be32_to_cpu(rep->rr_xid));
 
-       if (list_empty(&req->rl_registered) &&
-           !test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags))
-               rpcrdma_complete_rqst(rep);
-       else
-               queue_work(rpcrdma_receive_wq, &rep->rr_work);
+       queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
        return;
 
 out_badstatus:
index 646c244..6ee1ad8 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/sunrpc/addr.h>
+#include <linux/smp.h>
 
 #include "xprt_rdma.h"
 
@@ -656,6 +657,7 @@ xprt_rdma_allocate(struct rpc_task *task)
                task->tk_pid, __func__, rqst->rq_callsize,
                rqst->rq_rcvsize, req);
 
+       req->rl_cpu = smp_processor_id();
        req->rl_connect_cookie = 0;     /* our reserved value */
        rpcrdma_set_xprtdata(rqst, req);
        rqst->rq_buffer = req->rl_sendbuf->rg_base;
index 710b3f7..8607c02 100644 (file)
@@ -83,7 +83,7 @@ rpcrdma_alloc_wq(void)
        struct workqueue_struct *recv_wq;
 
        recv_wq = alloc_workqueue("xprtrdma_receive",
-                                 WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI,
+                                 WQ_MEM_RECLAIM | WQ_HIGHPRI,
                                  0);
        if (!recv_wq)
                return -ENOMEM;
index 51686d9..1342f74 100644 (file)
@@ -342,6 +342,7 @@ enum {
 struct rpcrdma_buffer;
 struct rpcrdma_req {
        struct list_head        rl_list;
+       int                     rl_cpu;
        unsigned int            rl_connect_cookie;
        struct rpcrdma_buffer   *rl_buffer;
        struct rpcrdma_rep      *rl_reply;
index 9cc850c..6d0cc3b 100644 (file)
@@ -2440,7 +2440,9 @@ static void xs_tcp_setup_socket(struct work_struct *work)
                 */
        case -ECONNREFUSED:
        case -ECONNRESET:
+       case -ENETDOWN:
        case -ENETUNREACH:
+       case -EHOSTUNREACH:
        case -EADDRINUSE:
        case -ENOBUFS:
                /*
index 47ec121..c800147 100644 (file)
@@ -324,6 +324,7 @@ restart:
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
+               kfree(b);
                return -EINVAL;
        }
 
@@ -347,8 +348,10 @@ restart:
        if (skb)
                tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
 
-       if (tipc_mon_create(net, bearer_id))
+       if (tipc_mon_create(net, bearer_id)) {
+               bearer_disable(net, b);
                return -ENOMEM;
+       }
 
        pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
                name,
index 12777ca..5f4ffae 100644 (file)
@@ -109,7 +109,8 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
 static void tipc_group_decr_active(struct tipc_group *grp,
                                   struct tipc_member *m)
 {
-       if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING)
+       if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
+           m->state == MBR_REMITTED)
                grp->active_cnt--;
 }
 
@@ -351,8 +352,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
        if (m->window >= ADV_IDLE)
                return;
 
-       if (!list_empty(&m->congested))
-               return;
+       list_del_init(&m->congested);
 
        /* Sort member into congested members' list */
        list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
@@ -369,18 +369,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
        u16 prev = grp->bc_snd_nxt - 1;
        struct tipc_member *m;
        struct rb_node *n;
+       u16 ackers = 0;
 
        for (n = rb_first(&grp->members); n; n = rb_next(n)) {
                m = container_of(n, struct tipc_member, tree_node);
                if (tipc_group_is_enabled(m)) {
                        tipc_group_update_member(m, len);
                        m->bc_acked = prev;
+                       ackers++;
                }
        }
 
        /* Mark number of acknowledges to expect, if any */
        if (ack)
-               grp->bc_ackers = grp->member_cnt;
+               grp->bc_ackers = ackers;
        grp->bc_snd_nxt++;
 }
 
@@ -497,6 +499,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
        while ((skb = skb_peek(defq))) {
                hdr = buf_msg(skb);
                mtyp = msg_type(hdr);
+               blks = msg_blocks(hdr);
                deliver = true;
                ack = false;
                update = false;
@@ -546,7 +549,6 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
                if (!update)
                        continue;
 
-               blks = msg_blocks(hdr);
                tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
        }
        return;
@@ -561,7 +563,7 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
        int max_active = grp->max_active;
        int reclaim_limit = max_active * 3 / 4;
        int active_cnt = grp->active_cnt;
-       struct tipc_member *m, *rm;
+       struct tipc_member *m, *rm, *pm;
 
        m = tipc_group_find_member(grp, node, port);
        if (!m)
@@ -604,6 +606,17 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
                        pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
                        tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
                }
+               grp->active_cnt--;
+               list_del_init(&m->list);
+               if (list_empty(&grp->pending))
+                       return;
+
+               /* Set oldest pending member to active and advertise */
+               pm = list_first_entry(&grp->pending, struct tipc_member, list);
+               pm->state = MBR_ACTIVE;
+               list_move_tail(&pm->list, &grp->active);
+               grp->active_cnt++;
+               tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
                break;
        case MBR_RECLAIMING:
        case MBR_DISCOVERED:
@@ -648,6 +661,7 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
        } else if (mtyp == GRP_REMIT_MSG) {
                msg_set_grp_remitted(hdr, m->window);
        }
+       msg_set_dest_droppable(hdr, true);
        __skb_queue_tail(xmitq, skb);
 }
 
@@ -689,15 +703,16 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                        msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
                        __skb_queue_tail(inputq, m->event_msg);
                }
-               if (m->window < ADV_IDLE)
-                       tipc_group_update_member(m, 0);
-               else
-                       list_del_init(&m->congested);
+               list_del_init(&m->congested);
+               tipc_group_update_member(m, 0);
                return;
        case GRP_LEAVE_MSG:
                if (!m)
                        return;
                m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+               list_del_init(&m->list);
+               list_del_init(&m->congested);
+               *usr_wakeup = true;
 
                /* Wait until WITHDRAW event is received */
                if (m->state != MBR_LEAVING) {
@@ -709,8 +724,6 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                ehdr = buf_msg(m->event_msg);
                msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
                __skb_queue_tail(inputq, m->event_msg);
-               *usr_wakeup = true;
-               list_del_init(&m->congested);
                return;
        case GRP_ADV_MSG:
                if (!m)
@@ -741,14 +754,14 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                if (!m || m->state != MBR_RECLAIMING)
                        return;
 
-               list_del_init(&m->list);
-               grp->active_cnt--;
                remitted = msg_grp_remitted(hdr);
 
                /* Messages preceding the REMIT still in receive queue */
                if (m->advertised > remitted) {
                        m->state = MBR_REMITTED;
                        in_flight = m->advertised - remitted;
+                       m->advertised = ADV_IDLE + in_flight;
+                       return;
                }
                /* All messages preceding the REMIT have been read */
                if (m->advertised <= remitted) {
@@ -760,6 +773,8 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                        tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
 
                m->advertised = ADV_IDLE + in_flight;
+               grp->active_cnt--;
+               list_del_init(&m->list);
 
                /* Set oldest pending member to active and advertise */
                if (list_empty(&grp->pending))
@@ -849,19 +864,29 @@ void tipc_group_member_evt(struct tipc_group *grp,
                *usr_wakeup = true;
                m->usr_pending = false;
                node_up = tipc_node_is_up(net, node);
-
-               /* Hold back event if more messages might be expected */
-               if (m->state != MBR_LEAVING && node_up) {
-                       m->event_msg = skb;
-                       tipc_group_decr_active(grp, m);
-                       m->state = MBR_LEAVING;
-               } else {
-                       if (node_up)
+               m->event_msg = NULL;
+
+               if (node_up) {
+                       /* Hold back event if a LEAVE msg should be expected */
+                       if (m->state != MBR_LEAVING) {
+                               m->event_msg = skb;
+                               tipc_group_decr_active(grp, m);
+                               m->state = MBR_LEAVING;
+                       } else {
                                msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-                       else
+                               __skb_queue_tail(inputq, skb);
+                       }
+               } else {
+                       if (m->state != MBR_LEAVING) {
+                               tipc_group_decr_active(grp, m);
+                               m->state = MBR_LEAVING;
                                msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
+                       } else {
+                               msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
+                       }
                        __skb_queue_tail(inputq, skb);
                }
+               list_del_init(&m->list);
                list_del_init(&m->congested);
        }
        *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
index 8e884ed..32dc33a 100644 (file)
@@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
 {
        struct tipc_net *tn = tipc_net(net);
        struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
-       struct tipc_peer *self = get_self(net, bearer_id);
+       struct tipc_peer *self;
        struct tipc_peer *peer, *tmp;
 
+       if (!mon)
+               return;
+
+       self = get_self(net, bearer_id);
        write_lock_bh(&mon->lock);
        tn->monitors[bearer_id] = NULL;
        list_for_each_entry_safe(peer, tmp, &self->list, list) {
index 507017f..9036d87 100644 (file)
@@ -1880,36 +1880,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
 
        if (strcmp(name, tipc_bclink_name) == 0) {
                err = tipc_nl_add_bc_link(net, &msg);
-               if (err) {
-                       nlmsg_free(msg.skb);
-                       return err;
-               }
+               if (err)
+                       goto err_free;
        } else {
                int bearer_id;
                struct tipc_node *node;
                struct tipc_link *link;
 
                node = tipc_node_find_by_name(net, name, &bearer_id);
-               if (!node)
-                       return -EINVAL;
+               if (!node) {
+                       err = -EINVAL;
+                       goto err_free;
+               }
 
                tipc_node_read_lock(node);
                link = node->links[bearer_id].link;
                if (!link) {
                        tipc_node_read_unlock(node);
-                       nlmsg_free(msg.skb);
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto err_free;
                }
 
                err = __tipc_nl_add_link(net, &msg, link, 0);
                tipc_node_read_unlock(node);
-               if (err) {
-                       nlmsg_free(msg.skb);
-                       return err;
-               }
+               if (err)
+                       goto err_free;
        }
 
        return genlmsg_reply(msg.skb, info);
+
+err_free:
+       nlmsg_free(msg.skb);
+       return err;
 }
 
 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
index acaef80..d60c303 100644 (file)
@@ -314,6 +314,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
        newcon->usr_data = s->tipc_conn_new(newcon->conid);
        if (!newcon->usr_data) {
                sock_release(newsock);
+               conn_put(newcon);
                return -ENOMEM;
        }
 
@@ -511,7 +512,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
        s = con->server;
        scbr = s->tipc_conn_new(*conid);
        if (!scbr) {
-               tipc_close_conn(con);
+               conn_put(con);
                return false;
        }
 
index 5d18c0c..3b40844 100644 (file)
@@ -727,11 +727,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
 
        switch (sk->sk_state) {
        case TIPC_ESTABLISHED:
+       case TIPC_CONNECTING:
                if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
                        revents |= POLLOUT;
                /* fall thru' */
        case TIPC_LISTEN:
-       case TIPC_CONNECTING:
                if (!skb_queue_empty(&sk->sk_receive_queue))
                        revents |= POLLIN | POLLRDNORM;
                break;
@@ -1140,7 +1140,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                                __skb_dequeue(arrvq);
                                __skb_queue_tail(inputq, skb);
                        }
-                       refcount_dec(&skb->users);
+                       kfree_skb(skb);
                        spin_unlock_bh(&inputq->lock);
                        continue;
                }
index ecca64f..3deabca 100644 (file)
@@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
                        goto rcu_out;
        }
 
-       tipc_rcv(sock_net(sk), skb, b);
-       rcu_read_unlock();
-       return 0;
-
 rcu_out:
        rcu_read_unlock();
 out:
index e07ee3a..736719c 100644 (file)
@@ -367,8 +367,10 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
 
        crypto_info = &ctx->crypto_send;
        /* Currently we don't support set crypto info more than one time */
-       if (TLS_CRYPTO_INFO_READY(crypto_info))
+       if (TLS_CRYPTO_INFO_READY(crypto_info)) {
+               rc = -EBUSY;
                goto out;
+       }
 
        rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
        if (rc) {
@@ -386,7 +388,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
        case TLS_CIPHER_AES_GCM_128: {
                if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
                        rc = -EINVAL;
-                       goto out;
+                       goto err_crypto_info;
                }
                rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
                                    optlen - sizeof(*crypto_info));
@@ -398,7 +400,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
        }
        default:
                rc = -EINVAL;
-               goto out;
+               goto err_crypto_info;
        }
 
        /* currently SW is default, we will have ethtool in future */
@@ -454,6 +456,15 @@ static int tls_init(struct sock *sk)
        struct tls_context *ctx;
        int rc = 0;
 
+       /* The TLS ulp is currently supported only for TCP sockets
+        * in ESTABLISHED state.
+        * Supporting sockets in LISTEN state will require us
+        * to modify the accept implementation to clone rather then
+        * share the ulp context.
+        */
+       if (sk->sk_state != TCP_ESTABLISHED)
+               return -ENOTSUPP;
+
        /* allocate tls context */
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
index 73d1921..0a9b72f 100644 (file)
@@ -391,7 +391,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
        while (msg_data_left(msg)) {
                if (sk->sk_err) {
-                       ret = sk->sk_err;
+                       ret = -sk->sk_err;
                        goto send_end;
                }
 
@@ -544,7 +544,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
                size_t copy, required_size;
 
                if (sk->sk_err) {
-                       ret = sk->sk_err;
+                       ret = -sk->sk_err;
                        goto sendpage_end;
                }
 
@@ -577,6 +577,8 @@ alloc_payload:
                get_page(page);
                sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
                sg_set_page(sg, page, copy, offset);
+               sg_unmark_end(sg);
+
                ctx->sg_plaintext_num_elem++;
 
                sk_mem_charge(sk, copy);
@@ -681,18 +683,17 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
        }
        default:
                rc = -EINVAL;
-               goto out;
+               goto free_priv;
        }
 
        ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
        ctx->tag_size = tag_size;
        ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
        ctx->iv_size = iv_size;
-       ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
-                         GFP_KERNEL);
+       ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
        if (!ctx->iv) {
                rc = -ENOMEM;
-               goto out;
+               goto free_priv;
        }
        memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
        memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
@@ -740,7 +741,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
 
        rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
        if (!rc)
-               goto out;
+               return 0;
 
 free_aead:
        crypto_free_aead(sw_ctx->aead_send);
@@ -751,6 +752,9 @@ free_rec_seq:
 free_iv:
        kfree(ctx->iv);
        ctx->iv = NULL;
+free_priv:
+       kfree(ctx->priv_ctx);
+       ctx->priv_ctx = NULL;
 out:
        return rc;
 }
index 5d28abf..c9473d6 100644 (file)
@@ -951,7 +951,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
                 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
                 * but local send is not shutdown.
                 */
-               if (sk->sk_state == TCP_CLOSE) {
+               if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
                        if (!(sk->sk_shutdown & SEND_SHUTDOWN))
                                mask |= POLLOUT | POLLWRNORM;
 
index 5583df7..a827547 100644 (file)
@@ -487,7 +487,7 @@ static void hvs_release(struct vsock_sock *vsk)
 
        lock_sock(sk);
 
-       sk->sk_state = SS_DISCONNECTING;
+       sk->sk_state = TCP_CLOSING;
        vsock_remove_sock(vsk);
 
        release_sock(sk);
index 391775e..a7a73ff 100644 (file)
@@ -797,11 +797,13 @@ static void vmci_transport_handle_detach(struct sock *sk)
 
                /* We should not be sending anymore since the peer won't be
                 * there to receive, but we can still receive if there is data
-                * left in our consume queue.
+                * left in our consume queue. If the local endpoint is a host,
+                * we can't call vsock_stream_has_data, since that may block,
+                * but a host endpoint can't read data once the VM has
+                * detached, so there is no available data in that case.
                 */
-               if (vsock_stream_has_data(vsk) <= 0) {
-                       sk->sk_state = TCP_CLOSE;
-
+               if (vsk->local_addr.svm_cid == VMADDR_CID_HOST ||
+                   vsock_stream_has_data(vsk) <= 0) {
                        if (sk->sk_state == TCP_SYN_SENT) {
                                /* The peer may detach from a queue pair while
                                 * we are still in the connecting state, i.e.,
@@ -811,10 +813,12 @@ static void vmci_transport_handle_detach(struct sock *sk)
                                 * event like a reset.
                                 */
 
+                               sk->sk_state = TCP_CLOSE;
                                sk->sk_err = ECONNRESET;
                                sk->sk_error_report(sk);
                                return;
                        }
+                       sk->sk_state = TCP_CLOSE;
                }
                sk->sk_state_change(sk);
        }
@@ -2144,7 +2148,7 @@ module_exit(vmci_transport_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.4.0-k");
+MODULE_VERSION("1.0.5.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
index da91bb5..1abcc4f 100644 (file)
@@ -20,6 +20,10 @@ config CFG80211
        tristate "cfg80211 - wireless configuration API"
        depends on RFKILL || !RFKILL
        select FW_LOADER
+       # may need to update this when certificates are changed and are
+       # using a different algorithm, though right now they shouldn't
+       # (this is here rather than below to allow it to be a module)
+       select CRYPTO_SHA256 if CFG80211_USE_KERNEL_REGDB_KEYS
        ---help---
          cfg80211 is the Linux wireless LAN (802.11) configuration API.
          Enable this if you have a wireless device.
@@ -113,6 +117,9 @@ config CFG80211_EXTRA_REGDB_KEYDIR
          certificates like in the kernel sources (net/wireless/certs/)
          that shall be accepted for a signed regulatory database.
 
+         Note that you need to also select the correct CRYPTO_<hash> modules
+         for your certificates, and if cfg80211 is built-in they also must be.
+
 config CFG80211_REG_CELLULAR_HINTS
        bool "cfg80211 regulatory support for cellular base station hints"
        depends on CFG80211_CERTIFICATION_ONUS
index 278d979..1d84f91 100644 (file)
@@ -23,19 +23,36 @@ ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
 cfg80211-y += extra-certs.o
 endif
 
-$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509)
+$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
        @$(kecho) "  GEN     $@"
-       @echo '#include "reg.h"' > $@
-       @echo 'const u8 shipped_regdb_certs[] = {' >> $@
-       @for f in $^ ; do hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ ; done
-       @echo '};' >> $@
-       @echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);' >> $@
+       @(echo '#include "reg.h"'; \
+         echo 'const u8 shipped_regdb_certs[] = {'; \
+         cat $^ ; \
+         echo '};'; \
+         echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
+        ) > $@
 
 $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
                      $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509)
        @$(kecho) "  GEN     $@"
-       @echo '#include "reg.h"' > $@
-       @echo 'const u8 extra_regdb_certs[] = {' >> $@
-       @for f in $^ ; do test -f $$f && hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ || true ; done
-       @echo '};' >> $@
-       @echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);' >> $@
+       @(set -e; \
+         allf=""; \
+         for f in $^ ; do \
+             # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
+             thisf=$$(od -An -v -tx1 < $$f | \
+                          sed -e 's/ /\n/g' | \
+                          sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
+                          sed -e 's/^/0x/;s/$$/,/'); \
+             # file should not be empty - maybe command substitution failed? \
+             test ! -z "$$thisf";\
+             allf=$$allf$$thisf;\
+         done; \
+         ( \
+             echo '#include "reg.h"'; \
+             echo 'const u8 extra_regdb_certs[] = {'; \
+             echo "$$allf"; \
+             echo '};'; \
+             echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
+         ) > $@)
+
+clean-files += shipped-certs.c extra-certs.c
diff --git a/net/wireless/certs/sforshee.hex b/net/wireless/certs/sforshee.hex
new file mode 100644 (file)
index 0000000..14ea666
--- /dev/null
@@ -0,0 +1,86 @@
+/* Seth Forshee's regdb certificate */
+0x30, 0x82, 0x02, 0xa4, 0x30, 0x82, 0x01, 0x8c,
+0x02, 0x09, 0x00, 0xb2, 0x8d, 0xdf, 0x47, 0xae,
+0xf9, 0xce, 0xa7, 0x30, 0x0d, 0x06, 0x09, 0x2a,
+0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
+0x05, 0x00, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f,
+0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73,
+0x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30,
+0x20, 0x17, 0x0d, 0x31, 0x37, 0x31, 0x30, 0x30,
+0x36, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a,
+0x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x39,
+0x31, 0x32, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35,
+0x5a, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06,
+0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66,
+0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x82,
+0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86,
+0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
+0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82,
+0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb5,
+0x40, 0xe3, 0x9c, 0x28, 0x84, 0x39, 0x03, 0xf2,
+0x39, 0xd7, 0x66, 0x2c, 0x41, 0x38, 0x15, 0xac,
+0x7e, 0xa5, 0x83, 0x71, 0x25, 0x7e, 0x90, 0x7c,
+0x68, 0xdd, 0x6f, 0x3f, 0xd9, 0xd7, 0x59, 0x38,
+0x9f, 0x7c, 0x6a, 0x52, 0xc2, 0x03, 0x2a, 0x2d,
+0x7e, 0x66, 0xf4, 0x1e, 0xb3, 0x12, 0x70, 0x20,
+0x5b, 0xd4, 0x97, 0x32, 0x3d, 0x71, 0x8b, 0x3b,
+0x1b, 0x08, 0x17, 0x14, 0x6b, 0x61, 0xc4, 0x57,
+0x8b, 0x96, 0x16, 0x1c, 0xfd, 0x24, 0xd5, 0x0b,
+0x09, 0xf9, 0x68, 0x11, 0x84, 0xfb, 0xca, 0x51,
+0x0c, 0xd1, 0x45, 0x19, 0xda, 0x10, 0x44, 0x8a,
+0xd9, 0xfe, 0x76, 0xa9, 0xfd, 0x60, 0x2d, 0x18,
+0x0b, 0x28, 0x95, 0xb2, 0x2d, 0xea, 0x88, 0x98,
+0xb8, 0xd1, 0x56, 0x21, 0xf0, 0x53, 0x1f, 0xf1,
+0x02, 0x6f, 0xe9, 0x46, 0x9b, 0x93, 0x5f, 0x28,
+0x90, 0x0f, 0xac, 0x36, 0xfa, 0x68, 0x23, 0x71,
+0x57, 0x56, 0xf6, 0xcc, 0xd3, 0xdf, 0x7d, 0x2a,
+0xd9, 0x1b, 0x73, 0x45, 0xeb, 0xba, 0x27, 0x85,
+0xef, 0x7a, 0x7f, 0xa5, 0xcb, 0x80, 0xc7, 0x30,
+0x36, 0xd2, 0x53, 0xee, 0xec, 0xac, 0x1e, 0xe7,
+0x31, 0xf1, 0x36, 0xa2, 0x9c, 0x63, 0xc6, 0x65,
+0x5b, 0x7f, 0x25, 0x75, 0x68, 0xa1, 0xea, 0xd3,
+0x7e, 0x00, 0x5c, 0x9a, 0x5e, 0xd8, 0x20, 0x18,
+0x32, 0x77, 0x07, 0x29, 0x12, 0x66, 0x1e, 0x36,
+0x73, 0xe7, 0x97, 0x04, 0x41, 0x37, 0xb1, 0xb1,
+0x72, 0x2b, 0xf4, 0xa1, 0x29, 0x20, 0x7c, 0x96,
+0x79, 0x0b, 0x2b, 0xd0, 0xd8, 0xde, 0xc8, 0x6c,
+0x3f, 0x93, 0xfb, 0xc5, 0xee, 0x78, 0x52, 0x11,
+0x15, 0x1b, 0x7a, 0xf6, 0xe2, 0x68, 0x99, 0xe7,
+0xfb, 0x46, 0x16, 0x84, 0xe3, 0xc7, 0xa1, 0xe6,
+0xe0, 0xd2, 0x46, 0xd5, 0xe1, 0xc4, 0x5f, 0xa0,
+0x66, 0xf4, 0xda, 0xc4, 0xff, 0x95, 0x1d, 0x02,
+0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09,
+0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
+0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00,
+0x87, 0x03, 0xda, 0xf2, 0x82, 0xc2, 0xdd, 0xaf,
+0x7c, 0x44, 0x2f, 0x86, 0xd3, 0x5f, 0x4c, 0x93,
+0x48, 0xb9, 0xfe, 0x07, 0x17, 0xbb, 0x21, 0xf7,
+0x25, 0x23, 0x4e, 0xaa, 0x22, 0x0c, 0x16, 0xb9,
+0x73, 0xae, 0x9d, 0x46, 0x7c, 0x75, 0xd9, 0xc3,
+0x49, 0x57, 0x47, 0xbf, 0x33, 0xb7, 0x97, 0xec,
+0xf5, 0x40, 0x75, 0xc0, 0x46, 0x22, 0xf0, 0xa0,
+0x5d, 0x9c, 0x79, 0x13, 0xa1, 0xff, 0xb8, 0xa3,
+0x2f, 0x7b, 0x8e, 0x06, 0x3f, 0xc8, 0xb6, 0xe4,
+0x6a, 0x28, 0xf2, 0x34, 0x5c, 0x23, 0x3f, 0x32,
+0xc0, 0xe6, 0xad, 0x0f, 0xac, 0xcf, 0x55, 0x74,
+0x47, 0x73, 0xd3, 0x01, 0x85, 0xb7, 0x0b, 0x22,
+0x56, 0x24, 0x7d, 0x9f, 0x09, 0xa9, 0x0e, 0x86,
+0x9e, 0x37, 0x5b, 0x9c, 0x6d, 0x02, 0xd9, 0x8c,
+0xc8, 0x50, 0x6a, 0xe2, 0x59, 0xf3, 0x16, 0x06,
+0xea, 0xb2, 0x42, 0xb5, 0x58, 0xfe, 0xba, 0xd1,
+0x81, 0x57, 0x1a, 0xef, 0xb2, 0x38, 0x88, 0x58,
+0xf6, 0xaa, 0xc4, 0x2e, 0x8b, 0x5a, 0x27, 0xe4,
+0xa5, 0xe8, 0xa4, 0xca, 0x67, 0x5c, 0xac, 0x72,
+0x67, 0xc3, 0x6f, 0x13, 0xc3, 0x2d, 0x35, 0x79,
+0xd7, 0x8a, 0xe7, 0xf5, 0xd4, 0x21, 0x30, 0x4a,
+0xd5, 0xf6, 0xa3, 0xd9, 0x79, 0x56, 0xf2, 0x0f,
+0x10, 0xf7, 0x7d, 0xd0, 0x51, 0x93, 0x2f, 0x47,
+0xf8, 0x7d, 0x4b, 0x0a, 0x84, 0x55, 0x12, 0x0a,
+0x7d, 0x4e, 0x3b, 0x1f, 0x2b, 0x2f, 0xfc, 0x28,
+0xb3, 0x69, 0x34, 0xe1, 0x80, 0x80, 0xbb, 0xe2,
+0xaf, 0xb9, 0xd6, 0x30, 0xf1, 0x1d, 0x54, 0x87,
+0x23, 0x99, 0x9f, 0x51, 0x03, 0x4c, 0x45, 0x7d,
+0x02, 0x65, 0x73, 0xab, 0xfd, 0xcf, 0x94, 0xcc,
+0x0d, 0x3a, 0x60, 0xfd, 0x3c, 0x14, 0x2f, 0x16,
+0x33, 0xa9, 0x21, 0x1f, 0xcb, 0x50, 0xb1, 0x8f,
+0x03, 0xee, 0xa0, 0x66, 0xa9, 0x16, 0x79, 0x14,
diff --git a/net/wireless/certs/sforshee.x509 b/net/wireless/certs/sforshee.x509
deleted file mode 100644 (file)
index c6f8f9d..0000000
Binary files a/net/wireless/certs/sforshee.x509 and /dev/null differ
index fdde0d9..a6f3cac 100644 (file)
@@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
                if (rv)
                        goto use_default_name;
        } else {
+               int rv;
+
 use_default_name:
                /* NOTE:  This is *probably* safe w/out holding rtnl because of
                 * the restrictions on phy names.  Probably this call could
@@ -446,7 +448,11 @@ use_default_name:
                 * phyX.  But, might should add some locking and check return
                 * value, and use a different name if this one exists?
                 */
-               dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
+               rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
+               if (rv < 0) {
+                       kfree(rdev);
+                       return NULL;
+               }
        }
 
        INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
index d2f7e8b..eaff636 100644 (file)
@@ -507,8 +507,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
 void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
                       struct wireless_dev *wdev);
 
-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
-
 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
 #define CFG80211_DEV_WARN_ON(cond)     WARN_ON(cond)
 #else
index b1ac23c..542a4fc 100644 (file)
@@ -2610,7 +2610,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
        case NL80211_IFTYPE_AP:
                if (wdev->ssid_len &&
                    nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
-                       goto nla_put_failure;
+                       goto nla_put_failure_locked;
                break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
@@ -2618,12 +2618,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
                const u8 *ssid_ie;
                if (!wdev->current_bss)
                        break;
+               rcu_read_lock();
                ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
                                               WLAN_EID_SSID);
-               if (!ssid_ie)
-                       break;
-               if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
-                       goto nla_put_failure;
+               if (ssid_ie &&
+                   nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
+                       goto nla_put_failure_rcu_locked;
+               rcu_read_unlock();
                break;
                }
        default:
@@ -2635,6 +2636,10 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
        genlmsg_end(msg, hdr);
        return 0;
 
+ nla_put_failure_rcu_locked:
+       rcu_read_unlock();
+ nla_put_failure_locked:
+       wdev_unlock(wdev);
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
        return -EMSGSIZE;
@@ -9804,7 +9809,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
         */
        if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss &&
            rdev->ops->get_station) {
-               struct station_info sinfo;
+               struct station_info sinfo = {};
                u8 *mac_addr;
 
                mac_addr = wdev->current_bss->pub.bssid;
@@ -11359,7 +11364,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
                break;
        case NL80211_NAN_FUNC_FOLLOW_UP:
                if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
-                   !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) {
+                   !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
+                   !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
                        err = -EINVAL;
                        goto out;
                }
index 78e71b0..7b42f0b 100644 (file)
@@ -1769,8 +1769,7 @@ static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
        if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS)
                return;
 
-       chan_before.center_freq = chan->center_freq;
-       chan_before.flags = chan->flags;
+       chan_before = *chan;
 
        if (chan->flags & IEEE80211_CHAN_NO_IR) {
                chan->flags &= ~IEEE80211_CHAN_NO_IR;
index 7ca04a7..05186a4 100644 (file)
@@ -1254,8 +1254,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       /* we are under RTNL - globally locked - so can use a static struct */
-       static struct station_info sinfo;
+       struct station_info sinfo = {};
        u8 addr[ETH_ALEN];
        int err;
 
index 30e5746..ac94771 100644 (file)
@@ -102,6 +102,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 
        err = dev->xfrmdev_ops->xdo_dev_state_add(x);
        if (err) {
+               xso->dev = NULL;
                dev_put(dev);
                return err;
        }
index 347ab31..5b24097 100644 (file)
@@ -8,15 +8,29 @@
  *
  */
 
+#include <linux/bottom_half.h>
+#include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <linux/percpu.h>
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
 #include <net/ip_tunnels.h>
 #include <net/ip6_tunnel.h>
 
+struct xfrm_trans_tasklet {
+       struct tasklet_struct tasklet;
+       struct sk_buff_head queue;
+};
+
+struct xfrm_trans_cb {
+       int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
+};
+
+#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
+
 static struct kmem_cache *secpath_cachep __read_mostly;
 
 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
@@ -25,6 +39,8 @@ static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
 static struct gro_cells gro_cells;
 static struct net_device xfrm_napi_dev;
 
+static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
+
 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
 {
        int err = 0;
@@ -207,7 +223,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        xfrm_address_t *daddr;
        struct xfrm_mode *inner_mode;
        u32 mark = skb->mark;
-       unsigned int family;
+       unsigned int family = AF_UNSPEC;
        int decaps = 0;
        int async = 0;
        bool xfrm_gro = false;
@@ -216,6 +232,16 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
        if (encap_type < 0) {
                x = xfrm_input_state(skb);
+
+               if (unlikely(x->km.state != XFRM_STATE_VALID)) {
+                       if (x->km.state == XFRM_STATE_ACQ)
+                               XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+                       else
+                               XFRM_INC_STATS(net,
+                                              LINUX_MIB_XFRMINSTATEINVALID);
+                       goto drop;
+               }
+
                family = x->outer_mode->afinfo->family;
 
                /* An encap_type of -1 indicates async resumption. */
@@ -467,9 +493,41 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
 }
 EXPORT_SYMBOL(xfrm_input_resume);
 
+static void xfrm_trans_reinject(unsigned long data)
+{
+       struct xfrm_trans_tasklet *trans = (void *)data;
+       struct sk_buff_head queue;
+       struct sk_buff *skb;
+
+       __skb_queue_head_init(&queue);
+       skb_queue_splice_init(&trans->queue, &queue);
+
+       while ((skb = __skb_dequeue(&queue)))
+               XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
+}
+
+int xfrm_trans_queue(struct sk_buff *skb,
+                    int (*finish)(struct net *, struct sock *,
+                                  struct sk_buff *))
+{
+       struct xfrm_trans_tasklet *trans;
+
+       trans = this_cpu_ptr(&xfrm_trans_tasklet);
+
+       if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
+               return -ENOBUFS;
+
+       XFRM_TRANS_SKB_CB(skb)->finish = finish;
+       __skb_queue_tail(&trans->queue, skb);
+       tasklet_schedule(&trans->tasklet);
+       return 0;
+}
+EXPORT_SYMBOL(xfrm_trans_queue);
+
 void __init xfrm_input_init(void)
 {
        int err;
+       int i;
 
        init_dummy_netdev(&xfrm_napi_dev);
        err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
@@ -480,4 +538,13 @@ void __init xfrm_input_init(void)
                                           sizeof(struct sec_path),
                                           0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
                                           NULL);
+
+       for_each_possible_cpu(i) {
+               struct xfrm_trans_tasklet *trans;
+
+               trans = &per_cpu(xfrm_trans_tasklet, i);
+               __skb_queue_head_init(&trans->queue);
+               tasklet_init(&trans->tasklet, xfrm_trans_reinject,
+                            (unsigned long)trans);
+       }
 }
index 9542975..bd6b0e7 100644 (file)
@@ -609,7 +609,8 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
        /* re-insert all policies by order of creation */
        list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
-               if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+               if (policy->walk.dead ||
+                   xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
                        /* skip socket policies */
                        continue;
                }
@@ -974,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
        }
        if (!cnt)
                err = -ESRCH;
-       else
-               xfrm_policy_cache_flush();
 out:
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
        return err;
@@ -1168,9 +1167,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
  again:
        pol = rcu_dereference(sk->sk_policy[dir]);
        if (pol != NULL) {
-               bool match = xfrm_selector_match(&pol->selector, fl, family);
+               bool match;
                int err = 0;
 
+               if (pol->family != family) {
+                       pol = NULL;
+                       goto out;
+               }
+
+               match = xfrm_selector_match(&pol->selector, fl, family);
                if (match) {
                        if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
                                pol = NULL;
@@ -1737,6 +1742,8 @@ void xfrm_policy_cache_flush(void)
        bool found = 0;
        int cpu;
 
+       might_sleep();
+
        local_bh_disable();
        rcu_read_lock();
        for_each_possible_cpu(cpu) {
@@ -1833,6 +1840,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
                   sizeof(struct xfrm_policy *) * num_pols) == 0 &&
            xfrm_xdst_can_reuse(xdst, xfrm, err)) {
                dst_hold(&xdst->u.dst);
+               xfrm_pols_put(pols, num_pols);
                while (err > 0)
                        xfrm_state_put(xfrm[--err]);
                return xdst;
@@ -2055,8 +2063,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
        if (num_xfrms <= 0)
                goto make_dummy_bundle;
 
+       local_bh_disable();
        xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
-                                                 xflo->dst_orig);
+                                             xflo->dst_orig);
+       local_bh_enable();
+
        if (IS_ERR(xdst)) {
                err = PTR_ERR(xdst);
                if (err != -EAGAIN)
@@ -2143,9 +2154,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                                goto no_transform;
                        }
 
+                       local_bh_disable();
                        xdst = xfrm_resolve_and_create_bundle(
                                        pols, num_pols, fl,
                                        family, dst_orig);
+                       local_bh_enable();
+
                        if (IS_ERR(xdst)) {
                                xfrm_pols_put(pols, num_pols);
                                err = PTR_ERR(xdst);
index 065d896..a3785f5 100644 (file)
@@ -313,13 +313,14 @@ retry:
        if ((type && !try_module_get(type->owner)))
                type = NULL;
 
+       rcu_read_unlock();
+
        if (!type && try_load) {
                request_module("xfrm-offload-%d-%d", family, proto);
-               try_load = 0;
+               try_load = false;
                goto retry;
        }
 
-       rcu_read_unlock();
        return type;
 }
 
@@ -1343,6 +1344,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
 
        if (orig->aead) {
                x->aead = xfrm_algo_aead_clone(orig->aead);
+               x->geniv = orig->geniv;
                if (!x->aead)
                        goto error;
        }
@@ -1533,8 +1535,12 @@ out:
        err = -EINVAL;
        spin_lock_bh(&x1->lock);
        if (likely(x1->km.state == XFRM_STATE_VALID)) {
-               if (x->encap && x1->encap)
+               if (x->encap && x1->encap &&
+                   x->encap->encap_type == x1->encap->encap_type)
                        memcpy(x1->encap, x->encap, sizeof(*x1->encap));
+               else if (x->encap || x1->encap)
+                       goto fail;
+
                if (x->coaddr && x1->coaddr) {
                        memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
                }
@@ -1551,6 +1557,8 @@ out:
                x->km.state = XFRM_STATE_DEAD;
                __xfrm_state_put(x);
        }
+
+fail:
        spin_unlock_bh(&x1->lock);
 
        xfrm_state_put(x1);
@@ -2264,8 +2272,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
                        goto error;
        }
 
-       x->km.state = XFRM_STATE_VALID;
-
 error:
        return err;
 }
@@ -2274,7 +2280,13 @@ EXPORT_SYMBOL(__xfrm_init_state);
 
 int xfrm_init_state(struct xfrm_state *x)
 {
-       return __xfrm_init_state(x, true, false);
+       int err;
+
+       err = __xfrm_init_state(x, true, false);
+       if (!err)
+               x->km.state = XFRM_STATE_VALID;
+
+       return err;
 }
 
 EXPORT_SYMBOL(xfrm_init_state);
index 983b023..7f52b8e 100644 (file)
@@ -598,13 +598,6 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
                        goto error;
        }
 
-       if (attrs[XFRMA_OFFLOAD_DEV]) {
-               err = xfrm_dev_state_add(net, x,
-                                        nla_data(attrs[XFRMA_OFFLOAD_DEV]));
-               if (err)
-                       goto error;
-       }
-
        if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
                                               attrs[XFRMA_REPLAY_ESN_VAL])))
                goto error;
@@ -620,6 +613,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
        /* override default values from above */
        xfrm_update_ae_params(x, attrs, 0);
 
+       /* configure the hardware if offload is requested */
+       if (attrs[XFRMA_OFFLOAD_DEV]) {
+               err = xfrm_dev_state_add(net, x,
+                                        nla_data(attrs[XFRMA_OFFLOAD_DEV]));
+               if (err)
+                       goto error;
+       }
+
        return x;
 
 error:
@@ -662,6 +663,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto out;
        }
 
+       if (x->km.state == XFRM_STATE_VOID)
+               x->km.state = XFRM_STATE_VALID;
+
        c.seq = nlh->nlmsg_seq;
        c.portid = nlh->nlmsg_pid;
        c.event = nlh->nlmsg_type;
@@ -1419,11 +1423,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
 
 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
 {
+       u16 prev_family;
        int i;
 
        if (nr > XFRM_MAX_DEPTH)
                return -EINVAL;
 
+       prev_family = family;
+
        for (i = 0; i < nr; i++) {
                /* We never validated the ut->family value, so many
                 * applications simply leave it at zero.  The check was
@@ -1435,6 +1442,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                if (!ut[i].family)
                        ut[i].family = family;
 
+               if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
+                   (ut[i].family != prev_family))
+                       return -EINVAL;
+
+               prev_family = ut[i].family;
+
                switch (ut[i].family) {
                case AF_INET:
                        break;
@@ -1445,6 +1458,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                default:
                        return -EINVAL;
                }
+
+               switch (ut[i].id.proto) {
+               case IPPROTO_AH:
+               case IPPROTO_ESP:
+               case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+               case IPPROTO_ROUTING:
+               case IPPROTO_DSTOPTS:
+#endif
+               case IPSEC_PROTO_ANY:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
        }
 
        return 0;
@@ -2470,7 +2498,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
        [XFRMA_PROTO]           = { .type = NLA_U8 },
        [XFRMA_ADDRESS_FILTER]  = { .len = sizeof(struct xfrm_address_filter) },
        [XFRMA_OFFLOAD_DEV]     = { .len = sizeof(struct xfrm_user_offload) },
-       [XFRMA_OUTPUT_MARK]     = { .len = NLA_U32 },
+       [XFRMA_OUTPUT_MARK]     = { .type = NLA_U32 },
 };
 
 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
index 522ca92..242631a 100644 (file)
@@ -193,8 +193,18 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
                return -1;
        }
        event_fd[prog_cnt - 1] = efd;
-       ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
-       ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
+       err = ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
+       if (err < 0) {
+               printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
+                      strerror(errno));
+               return -1;
+       }
+       err = ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
+       if (err < 0) {
+               printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
+                      strerror(errno));
+               return -1;
+       }
 
        return 0;
 }
index cb8997e..47cddf3 100644 (file)
@@ -265,12 +265,18 @@ else
 objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
 endif
 
+ifdef CONFIG_MODVERSIONS
+objtool_o = $(@D)/.tmp_$(@F)
+else
+objtool_o = $(@)
+endif
+
 # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
 # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
 # 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
 cmd_objtool = $(if $(patsubst y%,, \
        $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
-       $(__objtool_obj) $(objtool_args) "$(@)";)
+       $(__objtool_obj) $(objtool_args) "$(objtool_o)";)
 objtool_obj = $(if $(patsubst y%,, \
        $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
        $(__objtool_obj))
@@ -286,16 +292,16 @@ objtool_dep = $(objtool_obj)                                      \
 define rule_cc_o_c
        $(call echo-cmd,checksrc) $(cmd_checksrc)                         \
        $(call cmd_and_fixdep,cc_o_c)                                     \
-       $(cmd_modversions_c)                                              \
        $(cmd_checkdoc)                                                   \
        $(call echo-cmd,objtool) $(cmd_objtool)                           \
+       $(cmd_modversions_c)                                              \
        $(call echo-cmd,record_mcount) $(cmd_record_mcount)
 endef
 
 define rule_as_o_S
        $(call cmd_and_fixdep,as_o_S)                                     \
-       $(cmd_modversions_S)                                              \
-       $(call echo-cmd,objtool) $(cmd_objtool)
+       $(call echo-cmd,objtool) $(cmd_objtool)                           \
+       $(cmd_modversions_S)
 endef
 
 # List module undefined symbols (or empty line if not enabled)
index 6f099f9..94b6648 100755 (executable)
@@ -83,8 +83,11 @@ def print_result(symboltype, symbolformat, argc):
     for d, n in delta:
         if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
 
-    print("Total: Before=%d, After=%d, chg %+.2f%%" % \
-        (otot, ntot, (ntot - otot)*100.0/otot))
+    if otot:
+        percent = (ntot - otot) * 100.0 / otot
+    else:
+        percent = 0
+    print("Total: Before=%d, After=%d, chg %+.2f%%" % (otot, ntot, percent))
 
 if sys.argv[1] == "-c":
     print_result("Function", "tT", 3)
index 95cda3e..31031f1 100755 (executable)
@@ -5753,7 +5753,7 @@ sub process {
                        for (my $count = $linenr; $count <= $lc; $count++) {
                                my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
                                $fmt =~ s/%%//g;
-                               if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNO]).)/) {
+                               if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNOx]).)/) {
                                        $bad_extension = $1;
                                        last;
                                }
@@ -6233,28 +6233,6 @@ sub process {
                        }
                }
 
-# whine about ACCESS_ONCE
-               if ($^V && $^V ge 5.10.0 &&
-                   $line =~ /\bACCESS_ONCE\s*$balanced_parens\s*(=(?!=))?\s*($FuncArg)?/) {
-                       my $par = $1;
-                       my $eq = $2;
-                       my $fun = $3;
-                       $par =~ s/^\(\s*(.*)\s*\)$/$1/;
-                       if (defined($eq)) {
-                               if (WARN("PREFER_WRITE_ONCE",
-                                        "Prefer WRITE_ONCE(<FOO>, <BAR>) over ACCESS_ONCE(<FOO>) = <BAR>\n" . $herecurr) &&
-                                   $fix) {
-                                       $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)\s*$eq\s*\Q$fun\E/WRITE_ONCE($par, $fun)/;
-                               }
-                       } else {
-                               if (WARN("PREFER_READ_ONCE",
-                                        "Prefer READ_ONCE(<FOO>) over ACCESS_ONCE(<FOO>)\n" . $herecurr) &&
-                                   $fix) {
-                                       $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)/READ_ONCE($par)/;
-                               }
-                       }
-               }
-
 # check for mutex_trylock_recursive usage
                if ($line =~ /mutex_trylock_recursive/) {
                        ERROR("LOCKING",
index 438120d..5ea0710 100755 (executable)
@@ -59,6 +59,14 @@ disas() {
                ${CROSS_COMPILE}strip $1.o
        fi
 
+       if [ "$ARCH" = "arm64" ]; then
+               if [ $width -eq 4 ]; then
+                       type=inst
+               fi
+
+               ${CROSS_COMPILE}strip $1.o
+       fi
+
        ${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \
                grep -v "/tmp\|Disassembly\|\.text\|^$" > $1.dis 2>&1
 }
index 1f5ce95..7721d5b 100755 (executable)
 set -o errexit
 set -o nounset
 
+READELF="${CROSS_COMPILE:-}readelf"
+ADDR2LINE="${CROSS_COMPILE:-}addr2line"
+SIZE="${CROSS_COMPILE:-}size"
+NM="${CROSS_COMPILE:-}nm"
+
 command -v awk >/dev/null 2>&1 || die "awk isn't installed"
-command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
-command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
+command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
+command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
 
 usage() {
        echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
@@ -69,10 +76,10 @@ die() {
 find_dir_prefix() {
        local objfile=$1
 
-       local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+       local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
        [[ -z $start_kernel_addr ]] && return
 
-       local file_line=$(addr2line -e $objfile $start_kernel_addr)
+       local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
        [[ -z $file_line ]] && return
 
        local prefix=${file_line%init/main.c:*}
@@ -104,7 +111,7 @@ __faddr2line() {
 
        # Go through each of the object's symbols which match the func name.
        # In rare cases there might be duplicates.
-       file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
+       file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
        while read symbol; do
                local fields=($symbol)
                local sym_base=0x${fields[0]}
@@ -156,10 +163,10 @@ __faddr2line() {
 
                # pass real address to addr2line
                echo "$func+$offset/$sym_size:"
-               addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
+               ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
                DONE=1
 
-       done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
+       done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
 }
 
 [[ $# -lt 2 ]] && usage
index 1bf949c..f6ab3cc 100644 (file)
@@ -96,6 +96,8 @@ def get_thread_info(task):
         thread_info_addr = task.address + ia64_task_size
         thread_info = thread_info_addr.cast(thread_info_ptr_type)
     else:
+        if task.type.fields()[0].type == thread_info_type.get_type():
+            return task['thread_info']
         thread_info = task['stack'].cast(thread_info_ptr_type)
     return thread_info.dereference()
 
index 86dc07a..e7836b4 100644 (file)
@@ -1,4 +1,3 @@
-*.hash.c
 *.lex.c
 *.tab.c
 *.tab.h
index cbf4996..8cee597 100644 (file)
@@ -893,7 +893,10 @@ static enum string_value_kind expr_parse_string(const char *str,
        switch (type) {
        case S_BOOLEAN:
        case S_TRISTATE:
-               return k_string;
+               val->s = !strcmp(str, "n") ? 0 :
+                        !strcmp(str, "m") ? 1 :
+                        !strcmp(str, "y") ? 2 : -1;
+               return k_signed;
        case S_INT:
                val->s = strtoll(str, &tail, 10);
                kind = k_signed;
index bd29a92..df0f045 100755 (executable)
@@ -3248,4 +3248,4 @@ if ($verbose && $warnings) {
   print STDERR "$warnings warnings\n";
 }
 
-exit($errors);
+exit($output_mode eq "none" ? 0 : $errors);
index e8e4494..b0cb9a5 100644 (file)
@@ -54,6 +54,17 @@ config SECURITY_NETWORK
          implement socket and networking access controls.
          If you are unsure how to answer this question, answer N.
 
+config PAGE_TABLE_ISOLATION
+       bool "Remove the kernel mapping in user mode"
+       default y
+       depends on X86_64 && !UML
+       help
+         This feature reduces the number of hardware side channels by
+         ensuring that the majority of kernel addresses are not mapped
+         into userspace.
+
+         See Documentation/x86/pti.txt for more details.
+
 config SECURITY_INFINIBAND
        bool "Infiniband Security Hooks"
        depends on SECURITY && INFINIBAND
index 8542e9a..d4fa04d 100644 (file)
@@ -2451,7 +2451,7 @@ static int __init aa_create_aafs(void)
        aafs_mnt = kern_mount(&aafs_ops);
        if (IS_ERR(aafs_mnt))
                panic("can't set apparmorfs up\n");
-       aafs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+       aafs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
 
        /* Populate fs tree. */
        error = entry_create_dir(&aa_sfs_entry, NULL);
index 04ba9d0..6a54d2f 100644 (file)
@@ -330,10 +330,7 @@ static struct aa_profile *__attach_match(const char *name,
                        continue;
 
                if (profile->xmatch) {
-                       if (profile->xmatch_len == len) {
-                               conflict = true;
-                               continue;
-                       } else if (profile->xmatch_len > len) {
+                       if (profile->xmatch_len >= len) {
                                unsigned int state;
                                u32 perm;
 
@@ -342,6 +339,10 @@ static struct aa_profile *__attach_match(const char *name,
                                perm = dfa_user_allow(profile->xmatch, state);
                                /* any accepting state means a valid match. */
                                if (perm & MAY_EXEC) {
+                                       if (profile->xmatch_len == len) {
+                                               conflict = true;
+                                               continue;
+                                       }
                                        candidate = profile;
                                        len = profile->xmatch_len;
                                        conflict = false;
index 620e811..4ac0951 100644 (file)
@@ -121,17 +121,19 @@ struct apparmor_audit_data {
                /* these entries require a custom callback fn */
                struct {
                        struct aa_label *peer;
-                       struct {
-                               const char *target;
-                               kuid_t ouid;
-                       } fs;
+                       union {
+                               struct {
+                                       const char *target;
+                                       kuid_t ouid;
+                               } fs;
+                               int signal;
+                       };
                };
                struct {
                        struct aa_profile *profile;
                        const char *ns;
                        long pos;
                } iface;
-               int signal;
                struct {
                        int rlim;
                        unsigned long max;
index f546707..6505e1a 100644 (file)
@@ -86,7 +86,7 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
 
 static inline bool path_mediated_fs(struct dentry *dentry)
 {
-       return !(dentry->d_sb->s_flags & MS_NOUSER);
+       return !(dentry->d_sb->s_flags & SB_NOUSER);
 }
 
 
index 2b27bb7..d7b7e71 100644 (file)
@@ -133,6 +133,9 @@ extern struct aa_perms allperms;
 #define xcheck_labels_profiles(L1, L2, FN, args...)            \
        xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
 
+#define xcheck_labels(L1, L2, P, FN1, FN2)                     \
+       xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
+
 
 void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
 void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
index 7ca0032..b40678f 100644 (file)
@@ -64,40 +64,48 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
                        FLAGS_NONE, GFP_ATOMIC);
 }
 
+/* assumes check for PROFILE_MEDIATES is already done */
 /* TODO: conditionals */
 static int profile_ptrace_perm(struct aa_profile *profile,
-                              struct aa_profile *peer, u32 request,
-                              struct common_audit_data *sa)
+                            struct aa_label *peer, u32 request,
+                            struct common_audit_data *sa)
 {
        struct aa_perms perms = { };
 
-       /* need because of peer in cross check */
-       if (profile_unconfined(profile) ||
-           !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE))
-               return 0;
-
-       aad(sa)->peer = &peer->label;
-       aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request,
+       aad(sa)->peer = peer;
+       aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
                               &perms);
        aa_apply_modes_to_perms(profile, &perms);
        return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
 }
 
-static int cross_ptrace_perm(struct aa_profile *tracer,
-                            struct aa_profile *tracee, u32 request,
-                            struct common_audit_data *sa)
+static int profile_tracee_perm(struct aa_profile *tracee,
+                              struct aa_label *tracer, u32 request,
+                              struct common_audit_data *sa)
 {
+       if (profile_unconfined(tracee) || unconfined(tracer) ||
+           !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
+               return 0;
+
+       return profile_ptrace_perm(tracee, tracer, request, sa);
+}
+
+static int profile_tracer_perm(struct aa_profile *tracer,
+                              struct aa_label *tracee, u32 request,
+                              struct common_audit_data *sa)
+{
+       if (profile_unconfined(tracer))
+               return 0;
+
        if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
-               return xcheck(profile_ptrace_perm(tracer, tracee, request, sa),
-                             profile_ptrace_perm(tracee, tracer,
-                                                 request << PTRACE_PERM_SHIFT,
-                                                 sa));
-       /* policy uses the old style capability check for ptrace */
-       if (profile_unconfined(tracer) || tracer == tracee)
+               return profile_ptrace_perm(tracer, tracee, request, sa);
+
+       /* profile uses the old style capability check for ptrace */
+       if (&tracer->label == tracee)
                return 0;
 
        aad(sa)->label = &tracer->label;
-       aad(sa)->peer = &tracee->label;
+       aad(sa)->peer = tracee;
        aad(sa)->request = 0;
        aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1);
 
@@ -115,10 +123,13 @@ static int cross_ptrace_perm(struct aa_profile *tracer,
 int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
                  u32 request)
 {
+       struct aa_profile *profile;
+       u32 xrequest = request << PTRACE_PERM_SHIFT;
        DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
 
-       return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm,
-                                     request, &sa);
+       return xcheck_labels(tracer, tracee, profile,
+                       profile_tracer_perm(profile, tracee, request, &sa),
+                       profile_tracee_perm(profile, tracer, xrequest, &sa));
 }
 
 
index ed9b4d0..8c558cb 100644 (file)
@@ -329,6 +329,9 @@ static int match_mnt_path_str(struct aa_profile *profile,
        AA_BUG(!mntpath);
        AA_BUG(!buffer);
 
+       if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+               return 0;
+
        error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
                             &mntpnt, &info, profile->disconnected);
        if (error)
@@ -380,6 +383,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
        AA_BUG(!profile);
        AA_BUG(devpath && !devbuffer);
 
+       if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+               return 0;
+
        if (devpath) {
                error = aa_path_name(devpath, path_flags(profile, devpath),
                                     devbuffer, &devname, &info,
@@ -558,6 +564,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path,
        AA_BUG(!profile);
        AA_BUG(!path);
 
+       if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+               return 0;
+
        error = aa_path_name(path, path_flags(profile, path), buffer, &name,
                             &info, profile->disconnected);
        if (error)
@@ -613,7 +622,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
        AA_BUG(!new_path);
        AA_BUG(!old_path);
 
-       if (profile_unconfined(profile))
+       if (profile_unconfined(profile) ||
+           !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
                return aa_get_newest_label(&profile->label);
 
        error = aa_path_name(old_path, path_flags(profile, old_path),
index 4f8e093..48620c9 100644 (file)
@@ -348,21 +348,18 @@ static __u32 sansflags(__u32 m)
        return m & ~VFS_CAP_FLAGS_EFFECTIVE;
 }
 
-static bool is_v2header(size_t size, __le32 magic)
+static bool is_v2header(size_t size, const struct vfs_cap_data *cap)
 {
-       __u32 m = le32_to_cpu(magic);
        if (size != XATTR_CAPS_SZ_2)
                return false;
-       return sansflags(m) == VFS_CAP_REVISION_2;
+       return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
 }
 
-static bool is_v3header(size_t size, __le32 magic)
+static bool is_v3header(size_t size, const struct vfs_cap_data *cap)
 {
-       __u32 m = le32_to_cpu(magic);
-
        if (size != XATTR_CAPS_SZ_3)
                return false;
-       return sansflags(m) == VFS_CAP_REVISION_3;
+       return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
 }
 
 /*
@@ -405,7 +402,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
 
        fs_ns = inode->i_sb->s_user_ns;
        cap = (struct vfs_cap_data *) tmpbuf;
-       if (is_v2header((size_t) ret, cap->magic_etc)) {
+       if (is_v2header((size_t) ret, cap)) {
                /* If this is sizeof(vfs_cap_data) then we're ok with the
                 * on-disk value, so return that.  */
                if (alloc)
@@ -413,7 +410,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
                else
                        kfree(tmpbuf);
                return ret;
-       } else if (!is_v3header((size_t) ret, cap->magic_etc)) {
+       } else if (!is_v3header((size_t) ret, cap)) {
                kfree(tmpbuf);
                return -EINVAL;
        }
@@ -470,9 +467,9 @@ static kuid_t rootid_from_xattr(const void *value, size_t size,
        return make_kuid(task_ns, rootid);
 }
 
-static bool validheader(size_t size, __le32 magic)
+static bool validheader(size_t size, const struct vfs_cap_data *cap)
 {
-       return is_v2header(size, magic) || is_v3header(size, magic);
+       return is_v2header(size, cap) || is_v3header(size, cap);
 }
 
 /*
@@ -495,7 +492,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
 
        if (!*ivalue)
                return -EINVAL;
-       if (!validheader(size, cap->magic_etc))
+       if (!validheader(size, cap))
                return -EINVAL;
        if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP))
                return -EPERM;
index 6604918..d97c939 100644 (file)
@@ -833,7 +833,6 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
 
        key_check(keyring);
 
-       key_ref = ERR_PTR(-EPERM);
        if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
                restrict_link = keyring->restrict_link;
 
index 76d22f7..1ffe60b 100644 (file)
@@ -1588,9 +1588,8 @@ error_keyring:
  * The caller must have Setattr permission to change keyring restrictions.
  *
  * The requested type name may be a NULL pointer to reject all attempts
- * to link to the keyring. If _type is non-NULL, _restriction can be
- * NULL or a pointer to a string describing the restriction. If _type is
- * NULL, _restriction must also be NULL.
+ * to link to the keyring.  In this case, _restriction must also be NULL.
+ * Otherwise, both _type and _restriction must be non-NULL.
  *
  * Returns 0 if successful.
  */
@@ -1598,7 +1597,6 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
                             const char __user *_restriction)
 {
        key_ref_t key_ref;
-       bool link_reject = !_type;
        char type[32];
        char *restriction = NULL;
        long ret;
@@ -1607,31 +1605,29 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
        if (IS_ERR(key_ref))
                return PTR_ERR(key_ref);
 
+       ret = -EINVAL;
        if (_type) {
-               ret = key_get_type_from_user(type, _type, sizeof(type));
-               if (ret < 0)
+               if (!_restriction)
                        goto error;
-       }
 
-       if (_restriction) {
-               if (!_type) {
-                       ret = -EINVAL;
+               ret = key_get_type_from_user(type, _type, sizeof(type));
+               if (ret < 0)
                        goto error;
-               }
 
                restriction = strndup_user(_restriction, PAGE_SIZE);
                if (IS_ERR(restriction)) {
                        ret = PTR_ERR(restriction);
                        goto error;
                }
+       } else {
+               if (_restriction)
+                       goto error;
        }
 
-       ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction);
+       ret = keyring_restrict(key_ref, _type ? type : NULL, restriction);
        kfree(restriction);
-
 error:
        key_ref_put(key_ref);
-
        return ret;
 }
 
index e8036cd..114f740 100644 (file)
@@ -251,11 +251,12 @@ static int construct_key(struct key *key, const void *callout_info,
  * The keyring selected is returned with an extra reference upon it which the
  * caller must release.
  */
-static void construct_get_dest_keyring(struct key **_dest_keyring)
+static int construct_get_dest_keyring(struct key **_dest_keyring)
 {
        struct request_key_auth *rka;
        const struct cred *cred = current_cred();
        struct key *dest_keyring = *_dest_keyring, *authkey;
+       int ret;
 
        kenter("%p", dest_keyring);
 
@@ -264,6 +265,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
                /* the caller supplied one */
                key_get(dest_keyring);
        } else {
+               bool do_perm_check = true;
+
                /* use a default keyring; falling through the cases until we
                 * find one that we actually have */
                switch (cred->jit_keyring) {
@@ -278,8 +281,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
                                        dest_keyring =
                                                key_get(rka->dest_keyring);
                                up_read(&authkey->sem);
-                               if (dest_keyring)
+                               if (dest_keyring) {
+                                       do_perm_check = false;
                                        break;
+                               }
                        }
 
                case KEY_REQKEY_DEFL_THREAD_KEYRING:
@@ -314,11 +319,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
                default:
                        BUG();
                }
+
+               /*
+                * Require Write permission on the keyring.  This is essential
+                * because the default keyring may be the session keyring, and
+                * joining a keyring only requires Search permission.
+                *
+                * However, this check is skipped for the "requestor keyring" so
+                * that /sbin/request-key can itself use request_key() to add
+                * keys to the original requestor's destination keyring.
+                */
+               if (dest_keyring && do_perm_check) {
+                       ret = key_permission(make_key_ref(dest_keyring, 1),
+                                            KEY_NEED_WRITE);
+                       if (ret) {
+                               key_put(dest_keyring);
+                               return ret;
+                       }
+               }
        }
 
        *_dest_keyring = dest_keyring;
        kleave(" [dk %d]", key_serial(dest_keyring));
-       return;
+       return 0;
 }
 
 /*
@@ -444,11 +467,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
        if (ctx->index_key.type == &key_type_keyring)
                return ERR_PTR(-EPERM);
 
-       user = key_user_lookup(current_fsuid());
-       if (!user)
-               return ERR_PTR(-ENOMEM);
+       ret = construct_get_dest_keyring(&dest_keyring);
+       if (ret)
+               goto error;
 
-       construct_get_dest_keyring(&dest_keyring);
+       user = key_user_lookup(current_fsuid());
+       if (!user) {
+               ret = -ENOMEM;
+               goto error_put_dest_keyring;
+       }
 
        ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
        key_user_put(user);
@@ -463,7 +490,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
        } else if (ret == -EINPROGRESS) {
                ret = 0;
        } else {
-               goto couldnt_alloc_key;
+               goto error_put_dest_keyring;
        }
 
        key_put(dest_keyring);
@@ -473,8 +500,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
 construction_failed:
        key_negate_and_link(key, key_negative_timeout, NULL, NULL);
        key_put(key);
-couldnt_alloc_key:
+error_put_dest_keyring:
        key_put(dest_keyring);
+error:
        kleave(" = %d", ret);
        return ERR_PTR(ret);
 }
@@ -546,9 +574,7 @@ struct key *request_key_and_link(struct key_type *type,
        if (!IS_ERR(key_ref)) {
                key = key_ref_to_ptr(key_ref);
                if (dest_keyring) {
-                       construct_get_dest_keyring(&dest_keyring);
                        ret = key_link(dest_keyring, key);
-                       key_put(dest_keyring);
                        if (ret < 0) {
                                key_put(key);
                                key = ERR_PTR(ret);
index e49f448..e8b1987 100644 (file)
@@ -186,7 +186,7 @@ static int _snd_pcm_hw_param_mask(struct snd_pcm_hw_params *params,
 {
        int changed;
        changed = snd_mask_refine(hw_param_mask(params, var), val);
-       if (changed) {
+       if (changed > 0) {
                params->cmask |= 1 << var;
                params->rmask |= 1 << var;
        }
@@ -233,7 +233,7 @@ static int _snd_pcm_hw_param_min(struct snd_pcm_hw_params *params,
                                                  val, open);
        else
                return -EINVAL;
-       if (changed) {
+       if (changed > 0) {
                params->cmask |= 1 << var;
                params->rmask |= 1 << var;
        }
@@ -294,7 +294,7 @@ static int _snd_pcm_hw_param_max(struct snd_pcm_hw_params *params,
                                                  val, open);
        else
                return -EINVAL;
-       if (changed) {
+       if (changed > 0) {
                params->cmask |= 1 << var;
                params->rmask |= 1 << var;
        }
@@ -455,7 +455,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm,
                v = snd_pcm_hw_param_last(pcm, params, var, dir);
        else
                v = snd_pcm_hw_param_first(pcm, params, var, dir);
-       snd_BUG_ON(v < 0);
        return v;
 }
 
@@ -500,7 +499,7 @@ static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
                }
        } else
                return -EINVAL;
-       if (changed) {
+       if (changed > 0) {
                params->cmask |= 1 << var;
                params->rmask |= 1 << var;
        }
@@ -540,7 +539,7 @@ static int _snd_pcm_hw_param_setinteger(struct snd_pcm_hw_params *params,
 {
        int changed;
        changed = snd_interval_setinteger(hw_param_interval(params, var));
-       if (changed) {
+       if (changed > 0) {
                params->cmask |= 1 << var;
                params->rmask |= 1 << var;
        }
@@ -843,7 +842,7 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
                if (!(mutex_trylock(&runtime->oss.params_lock)))
                        return -EAGAIN;
        } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
-               return -EINTR;
+               return -ERESTARTSYS;
        sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
        params = kmalloc(sizeof(*params), GFP_KERNEL);
        sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
@@ -1335,8 +1334,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
 
        if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
                return tmp;
-       mutex_lock(&runtime->oss.params_lock);
        while (bytes > 0) {
+               if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
                if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
                        tmp = bytes;
                        if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1380,14 +1382,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
                        xfer += tmp;
                        if ((substream->f_flags & O_NONBLOCK) != 0 &&
                            tmp != runtime->oss.period_bytes)
-                               break;
+                               tmp = -EAGAIN;
                }
-       }
-       mutex_unlock(&runtime->oss.params_lock);
-       return xfer;
-
  err:
-       mutex_unlock(&runtime->oss.params_lock);
+               mutex_unlock(&runtime->oss.params_lock);
+               if (tmp < 0)
+                       break;
+               if (signal_pending(current)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
+               tmp = 0;
+       }
        return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
 }
 
@@ -1435,8 +1441,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
 
        if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
                return tmp;
-       mutex_lock(&runtime->oss.params_lock);
        while (bytes > 0) {
+               if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
                if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
                        if (runtime->oss.buffer_used == 0) {
                                tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1467,12 +1476,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
                        bytes -= tmp;
                        xfer += tmp;
                }
-       }
-       mutex_unlock(&runtime->oss.params_lock);
-       return xfer;
-
  err:
-       mutex_unlock(&runtime->oss.params_lock);
+               mutex_unlock(&runtime->oss.params_lock);
+               if (tmp < 0)
+                       break;
+               if (signal_pending(current)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
+               tmp = 0;
+       }
        return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
 }
 
index cadc937..85a56af 100644 (file)
@@ -592,18 +592,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
        snd_pcm_sframes_t frames = size;
 
        plugin = snd_pcm_plug_first(plug);
-       while (plugin && frames > 0) {
+       while (plugin) {
+               if (frames <= 0)
+                       return frames;
                if ((next = plugin->next) != NULL) {
                        snd_pcm_sframes_t frames1 = frames;
-                       if (plugin->dst_frames)
+                       if (plugin->dst_frames) {
                                frames1 = plugin->dst_frames(plugin, frames);
+                               if (frames1 <= 0)
+                                       return frames1;
+                       }
                        if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
                                return err;
                        }
                        if (err != frames1) {
                                frames = err;
-                               if (plugin->src_frames)
+                               if (plugin->src_frames) {
                                        frames = plugin->src_frames(plugin, frames1);
+                                       if (frames <= 0)
+                                               return frames;
+                               }
                        }
                } else
                        dst_channels = NULL;
index 9070f27..09ee8c6 100644 (file)
@@ -153,7 +153,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
                                err = -ENXIO;
                                goto _error;
                        }
+                       mutex_lock(&pcm->open_mutex);
                        err = snd_pcm_info_user(substream, info);
+                       mutex_unlock(&pcm->open_mutex);
                _error:
                        mutex_unlock(&register_mutex);
                        return err;
index 10e7ef7..a83152e 100644 (file)
@@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
 {
        u_int64_t n = (u_int64_t) a * b;
        if (c == 0) {
-               snd_BUG_ON(!n);
                *r = 0;
                return UINT_MAX;
        }
@@ -1603,7 +1602,7 @@ static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
                changed = snd_interval_refine_first(hw_param_interval(params, var));
        else
                return -EINVAL;
-       if (changed) {
+       if (changed > 0) {
                params->cmask |= 1 << var;
                params->rmask |= 1 << var;
        }
@@ -1632,7 +1631,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
                return changed;
        if (params->rmask) {
                int err = snd_pcm_hw_refine(pcm, params);
-               if (snd_BUG_ON(err < 0))
+               if (err < 0)
                        return err;
        }
        return snd_pcm_hw_param_value(params, var, dir);
@@ -1649,7 +1648,7 @@ static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
                changed = snd_interval_refine_last(hw_param_interval(params, var));
        else
                return -EINVAL;
-       if (changed) {
+       if (changed > 0) {
                params->cmask |= 1 << var;
                params->rmask |= 1 << var;
        }
@@ -1678,7 +1677,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
                return changed;
        if (params->rmask) {
                int err = snd_pcm_hw_refine(pcm, params);
-               if (snd_BUG_ON(err < 0))
+               if (err < 0)
                        return err;
        }
        return snd_pcm_hw_param_value(params, var, dir);
index 9be8102..c4eb561 100644 (file)
@@ -163,13 +163,30 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
                .width = 32, .phys = 32, .le = 0, .signd = 0,
                .silence = { 0x69, 0x69, 0x69, 0x69 },
        },
-       /* FIXME: the following three formats are not defined properly yet */
+       /* FIXME: the following two formats are not defined properly yet */
        [SNDRV_PCM_FORMAT_MPEG] = {
                .le = -1, .signd = -1,
        },
        [SNDRV_PCM_FORMAT_GSM] = {
                .le = -1, .signd = -1,
        },
+       [SNDRV_PCM_FORMAT_S20_LE] = {
+               .width = 20, .phys = 32, .le = 1, .signd = 1,
+               .silence = {},
+       },
+       [SNDRV_PCM_FORMAT_S20_BE] = {
+               .width = 20, .phys = 32, .le = 0, .signd = 1,
+               .silence = {},
+       },
+       [SNDRV_PCM_FORMAT_U20_LE] = {
+               .width = 20, .phys = 32, .le = 1, .signd = 0,
+               .silence = { 0x00, 0x00, 0x08, 0x00 },
+       },
+       [SNDRV_PCM_FORMAT_U20_BE] = {
+               .width = 20, .phys = 32, .le = 0, .signd = 0,
+               .silence = { 0x00, 0x08, 0x00, 0x00 },
+       },
+       /* FIXME: the following format is not defined properly yet */
        [SNDRV_PCM_FORMAT_SPECIAL] = {
                .le = -1, .signd = -1,
        },
index a4d92e4..484a18d 100644 (file)
@@ -2580,7 +2580,7 @@ static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
        return ret < 0 ? ret : frames;
 }
 
-/* decrease the appl_ptr; returns the processed frames or a negative error */
+/* decrease the appl_ptr; returns the processed frames or zero for error */
 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
                                         snd_pcm_uframes_t frames,
                                         snd_pcm_sframes_t avail)
@@ -2597,7 +2597,12 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
        if (appl_ptr < 0)
                appl_ptr += runtime->boundary;
        ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
-       return ret < 0 ? ret : frames;
+       /* NOTE: we return zero for errors because PulseAudio gets depressed
+        * upon receiving an error from rewind ioctl and stops processing
+        * any longer.  Returning zero means that no rewind is done, so
+        * it's not absolutely wrong to answer like that.
+        */
+       return ret < 0 ? 0 : frames;
 }
 
 static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
@@ -3441,7 +3446,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
                           struct vm_area_struct *area)
 {
-       struct snd_pcm_runtime *runtime = substream->runtime;;
+       struct snd_pcm_runtime *runtime = substream->runtime;
 
        area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
        return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
index b3b353d..f055ca1 100644 (file)
@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
        return 0;
 }
 
-int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+static int __snd_rawmidi_info_select(struct snd_card *card,
+                                    struct snd_rawmidi_info *info)
 {
        struct snd_rawmidi *rmidi;
        struct snd_rawmidi_str *pstr;
        struct snd_rawmidi_substream *substream;
 
-       mutex_lock(&register_mutex);
        rmidi = snd_rawmidi_search(card, info->device);
-       mutex_unlock(&register_mutex);
        if (!rmidi)
                return -ENXIO;
        if (info->stream < 0 || info->stream > 1)
@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
        }
        return -ENXIO;
 }
+
+int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+{
+       int ret;
+
+       mutex_lock(&register_mutex);
+       ret = __snd_rawmidi_info_select(card, info);
+       mutex_unlock(&register_mutex);
+       return ret;
+}
 EXPORT_SYMBOL(snd_rawmidi_info_select);
 
 static int snd_rawmidi_info_select_user(struct snd_card *card,
index 6e22eea..d019134 100644 (file)
@@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
        rwlock_init(&client->ports_lock);
        mutex_init(&client->ports_mutex);
        INIT_LIST_HEAD(&client->ports_list_head);
+       mutex_init(&client->ioctl_mutex);
 
        /* find free slot in the client table */
        spin_lock_irqsave(&clients_lock, flags);
@@ -2130,7 +2131,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd,
                        return -EFAULT;
        }
 
+       mutex_lock(&client->ioctl_mutex);
        err = handler->func(client, &buf);
+       mutex_unlock(&client->ioctl_mutex);
        if (err >= 0) {
                /* Some commands includes a bug in 'dir' field. */
                if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
index c661425..0611e1e 100644 (file)
@@ -61,6 +61,7 @@ struct snd_seq_client {
        struct list_head ports_list_head;
        rwlock_t ports_lock;
        struct mutex ports_mutex;
+       struct mutex ioctl_mutex;
        int convert32;          /* convert 32->64bit */
 
        /* output pool */
index 79e0c56..0428e90 100644 (file)
@@ -497,9 +497,7 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
                return -EPERM;
        }
 
-       result = snd_seq_timer_set_tempo(q->timer, info->tempo);
-       if (result >= 0)
-               result = snd_seq_timer_set_ppq(q->timer, info->ppq);
+       result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq);
        if (result >= 0 && info->skew_base > 0)
                result = snd_seq_timer_set_skew(q->timer, info->skew_value,
                                                info->skew_base);
index 37d9cfb..2316757 100644 (file)
@@ -191,14 +191,15 @@ int snd_seq_timer_set_tempo(struct snd_seq_timer * tmr, int tempo)
        return 0;
 }
 
-/* set current ppq */
-int snd_seq_timer_set_ppq(struct snd_seq_timer * tmr, int ppq)
+/* set current tempo and ppq in a shot */
+int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq)
 {
+       int changed;
        unsigned long flags;
 
        if (snd_BUG_ON(!tmr))
                return -EINVAL;
-       if (ppq <= 0)
+       if (tempo <= 0 || ppq <= 0)
                return -EINVAL;
        spin_lock_irqsave(&tmr->lock, flags);
        if (tmr->running && (ppq != tmr->ppq)) {
@@ -208,9 +209,11 @@ int snd_seq_timer_set_ppq(struct snd_seq_timer * tmr, int ppq)
                pr_debug("ALSA: seq: cannot change ppq of a running timer\n");
                return -EBUSY;
        }
-
+       changed = (tempo != tmr->tempo) || (ppq != tmr->ppq);
+       tmr->tempo = tempo;
        tmr->ppq = ppq;
-       snd_seq_timer_set_tick_resolution(tmr);
+       if (changed)
+               snd_seq_timer_set_tick_resolution(tmr);
        spin_unlock_irqrestore(&tmr->lock, flags);
        return 0;
 }
@@ -355,7 +358,7 @@ static int initialize_timer(struct snd_seq_timer *tmr)
        unsigned long freq;
 
        t = tmr->timeri->timer;
-       if (snd_BUG_ON(!t))
+       if (!t)
                return -EINVAL;
 
        freq = tmr->preferred_resolution;
index 9506b66..62f3906 100644 (file)
@@ -131,7 +131,7 @@ int snd_seq_timer_stop(struct snd_seq_timer *tmr);
 int snd_seq_timer_start(struct snd_seq_timer *tmr);
 int snd_seq_timer_continue(struct snd_seq_timer *tmr);
 int snd_seq_timer_set_tempo(struct snd_seq_timer *tmr, int tempo);
-int snd_seq_timer_set_ppq(struct snd_seq_timer *tmr, int ppq);
+int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
 int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
 int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
 int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
index afac886..0333143 100644 (file)
@@ -39,6 +39,7 @@
 #include <sound/core.h>
 #include <sound/control.h>
 #include <sound/pcm.h>
+#include <sound/pcm_params.h>
 #include <sound/info.h>
 #include <sound/initval.h>
 
@@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
        return 0;
 }
 
-static void params_change_substream(struct loopback_pcm *dpcm,
-                                   struct snd_pcm_runtime *runtime)
-{
-       struct snd_pcm_runtime *dst_runtime;
-
-       if (dpcm == NULL || dpcm->substream == NULL)
-               return;
-       dst_runtime = dpcm->substream->runtime;
-       if (dst_runtime == NULL)
-               return;
-       dst_runtime->hw = dpcm->cable->hw;
-}
-
 static void params_change(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
@@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream)
        cable->hw.rate_max = runtime->rate;
        cable->hw.channels_min = runtime->channels;
        cable->hw.channels_max = runtime->channels;
-       params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
-                               runtime);
-       params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
-                               runtime);
 }
 
 static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream)
 static int rule_format(struct snd_pcm_hw_params *params,
                       struct snd_pcm_hw_rule *rule)
 {
+       struct loopback_pcm *dpcm = rule->private;
+       struct loopback_cable *cable = dpcm->cable;
+       struct snd_mask m;
 
-       struct snd_pcm_hardware *hw = rule->private;
-       struct snd_mask *maskp = hw_param_mask(params, rule->var);
-
-       maskp->bits[0] &= (u_int32_t)hw->formats;
-       maskp->bits[1] &= (u_int32_t)(hw->formats >> 32);
-       memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
-       if (! maskp->bits[0] && ! maskp->bits[1])
-               return -EINVAL;
-       return 0;
+       snd_mask_none(&m);
+       mutex_lock(&dpcm->loopback->cable_lock);
+       m.bits[0] = (u_int32_t)cable->hw.formats;
+       m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
+       mutex_unlock(&dpcm->loopback->cable_lock);
+       return snd_mask_refine(hw_param_mask(params, rule->var), &m);
 }
 
 static int rule_rate(struct snd_pcm_hw_params *params,
                     struct snd_pcm_hw_rule *rule)
 {
-       struct snd_pcm_hardware *hw = rule->private;
+       struct loopback_pcm *dpcm = rule->private;
+       struct loopback_cable *cable = dpcm->cable;
        struct snd_interval t;
 
-        t.min = hw->rate_min;
-        t.max = hw->rate_max;
+       mutex_lock(&dpcm->loopback->cable_lock);
+       t.min = cable->hw.rate_min;
+       t.max = cable->hw.rate_max;
+       mutex_unlock(&dpcm->loopback->cable_lock);
         t.openmin = t.openmax = 0;
         t.integer = 0;
        return snd_interval_refine(hw_param_interval(params, rule->var), &t);
@@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params,
 static int rule_channels(struct snd_pcm_hw_params *params,
                         struct snd_pcm_hw_rule *rule)
 {
-       struct snd_pcm_hardware *hw = rule->private;
+       struct loopback_pcm *dpcm = rule->private;
+       struct loopback_cable *cable = dpcm->cable;
        struct snd_interval t;
 
-        t.min = hw->channels_min;
-        t.max = hw->channels_max;
+       mutex_lock(&dpcm->loopback->cable_lock);
+       t.min = cable->hw.channels_min;
+       t.max = cable->hw.channels_max;
+       mutex_unlock(&dpcm->loopback->cable_lock);
         t.openmin = t.openmax = 0;
         t.integer = 0;
        return snd_interval_refine(hw_param_interval(params, rule->var), &t);
 }
 
+static void free_cable(struct snd_pcm_substream *substream)
+{
+       struct loopback *loopback = substream->private_data;
+       int dev = get_cable_index(substream);
+       struct loopback_cable *cable;
+
+       cable = loopback->cables[substream->number][dev];
+       if (!cable)
+               return;
+       if (cable->streams[!substream->stream]) {
+               /* other stream is still alive */
+               cable->streams[substream->stream] = NULL;
+       } else {
+               /* free the cable */
+               loopback->cables[substream->number][dev] = NULL;
+               kfree(cable);
+       }
+}
+
 static int loopback_open(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct loopback *loopback = substream->private_data;
        struct loopback_pcm *dpcm;
-       struct loopback_cable *cable;
+       struct loopback_cable *cable = NULL;
        int err = 0;
        int dev = get_cable_index(substream);
 
@@ -681,7 +690,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
        if (!cable) {
                cable = kzalloc(sizeof(*cable), GFP_KERNEL);
                if (!cable) {
-                       kfree(dpcm);
                        err = -ENOMEM;
                        goto unlock;
                }
@@ -699,19 +707,19 @@ static int loopback_open(struct snd_pcm_substream *substream)
        /* are cached -> they do not reflect the actual state */
        err = snd_pcm_hw_rule_add(runtime, 0,
                                  SNDRV_PCM_HW_PARAM_FORMAT,
-                                 rule_format, &runtime->hw,
+                                 rule_format, dpcm,
                                  SNDRV_PCM_HW_PARAM_FORMAT, -1);
        if (err < 0)
                goto unlock;
        err = snd_pcm_hw_rule_add(runtime, 0,
                                  SNDRV_PCM_HW_PARAM_RATE,
-                                 rule_rate, &runtime->hw,
+                                 rule_rate, dpcm,
                                  SNDRV_PCM_HW_PARAM_RATE, -1);
        if (err < 0)
                goto unlock;
        err = snd_pcm_hw_rule_add(runtime, 0,
                                  SNDRV_PCM_HW_PARAM_CHANNELS,
-                                 rule_channels, &runtime->hw,
+                                 rule_channels, dpcm,
                                  SNDRV_PCM_HW_PARAM_CHANNELS, -1);
        if (err < 0)
                goto unlock;
@@ -723,6 +731,10 @@ static int loopback_open(struct snd_pcm_substream *substream)
        else
                runtime->hw = cable->hw;
  unlock:
+       if (err < 0) {
+               free_cable(substream);
+               kfree(dpcm);
+       }
        mutex_unlock(&loopback->cable_lock);
        return err;
 }
@@ -731,20 +743,10 @@ static int loopback_close(struct snd_pcm_substream *substream)
 {
        struct loopback *loopback = substream->private_data;
        struct loopback_pcm *dpcm = substream->runtime->private_data;
-       struct loopback_cable *cable;
-       int dev = get_cable_index(substream);
 
        loopback_timer_stop(dpcm);
        mutex_lock(&loopback->cable_lock);
-       cable = loopback->cables[substream->number][dev];
-       if (cable->streams[!substream->stream]) {
-               /* other stream is still alive */
-               cable->streams[substream->stream] = NULL;
-       } else {
-               /* free the cable */
-               loopback->cables[substream->number][dev] = NULL;
-               kfree(cable);
-       }
+       free_cable(substream);
        mutex_unlock(&loopback->cable_lock);
        return 0;
 }
index 7b2b1f7..69db45b 100644 (file)
@@ -830,7 +830,7 @@ static int snd_dummy_capsrc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_el
 static int snd_dummy_iobox_info(struct snd_kcontrol *kcontrol,
                                struct snd_ctl_elem_info *info)
 {
-       const char *const names[] = { "None", "CD Player" };
+       static const char *const names[] = { "None", "CD Player" };
 
        return snd_ctl_enum_info(info, 1, 2, names);
 }
index 31b510c..0daf313 100644 (file)
@@ -146,7 +146,7 @@ int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *ebus, int addr)
        edev = kzalloc(sizeof(*edev), GFP_KERNEL);
        if (!edev)
                return -ENOMEM;
-       hdev = &edev->hdac;
+       hdev = &edev->hdev;
        edev->ebus = ebus;
 
        snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
index 038a180..cbe818e 100644 (file)
@@ -325,7 +325,7 @@ static int hdac_component_master_match(struct device *dev, void *data)
  */
 int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
 {
-       if (WARN_ON(!hdac_acomp))
+       if (!hdac_acomp)
                return -ENODEV;
 
        hdac_acomp->audio_ops = aops;
index 36c27c8..7f95f45 100644 (file)
@@ -201,10 +201,9 @@ int snd_gf1_dma_transfer_block(struct snd_gus_card * gus,
        struct snd_gf1_dma_block *block;
 
        block = kmalloc(sizeof(*block), atomic ? GFP_ATOMIC : GFP_KERNEL);
-       if (block == NULL) {
-               snd_printk(KERN_ERR "gf1: DMA transfer failure; not enough memory\n");
+       if (!block)
                return -ENOMEM;
-       }
+
        *block = *__block;
        block->next = NULL;
 
index 37d378a..c8904e7 100644 (file)
@@ -814,7 +814,7 @@ static int hal2_create(struct snd_card *card, struct snd_hal2 **rchip)
        struct hpc3_regs *hpc3 = hpc3c0;
        int err;
 
-       hal2 = kzalloc(sizeof(struct snd_hal2), GFP_KERNEL);
+       hal2 = kzalloc(sizeof(*hal2), GFP_KERNEL);
        if (!hal2)
                return -ENOMEM;
 
index 71c9421..9fb68b3 100644 (file)
@@ -840,7 +840,7 @@ static int snd_sgio2audio_create(struct snd_card *card,
        if (!(readq(&mace->perif.audio.control) & AUDIO_CONTROL_CODEC_PRESENT))
                return -ENOENT;
 
-       chip = kzalloc(sizeof(struct snd_sgio2audio), GFP_KERNEL);
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
        if (chip == NULL)
                return -ENOMEM;
 
index 7f3b5ed..f7a492c 100644 (file)
@@ -88,7 +88,6 @@ config SND_HDA_PATCH_LOADER
 config SND_HDA_CODEC_REALTEK
        tristate "Build Realtek HD-audio codec support"
        select SND_HDA_GENERIC
-       select INPUT
        help
          Say Y or M here to include Realtek HD-audio codec support in
          snd-hda-intel driver, such as ALC880.
index 80bbadc..d6e079f 100644 (file)
@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
        /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
 
        /* codec SSID */
+       SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
        SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
        SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
        SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
index a81aacf..37e1cf8 100644 (file)
@@ -271,6 +271,8 @@ enum {
        CXT_FIXUP_HP_SPECTRE,
        CXT_FIXUP_HP_GATE_MIC,
        CXT_FIXUP_MUTE_LED_GPIO,
+       CXT_FIXUP_HEADSET_MIC,
+       CXT_FIXUP_HP_MIC_NO_PRESENCE,
 };
 
 /* for hda_fixup_thinkpad_acpi() */
@@ -350,6 +352,18 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec,
        }
 }
 
+static void cxt_fixup_headset_mic(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct conexant_spec *spec = codec->spec;
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+               break;
+       }
+}
+
 /* OPLC XO 1.5 fixup */
 
 /* OLPC XO-1.5 supports DC input mode (e.g. for use with analog sensors)
@@ -880,6 +894,19 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt_fixup_mute_led_gpio,
        },
+       [CXT_FIXUP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_headset_mic,
+       },
+       [CXT_FIXUP_HP_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x02a1113c },
+                       { }
+               },
+               .chained = true,
+               .chain_id = CXT_FIXUP_HEADSET_MIC,
+       },
 };
 
 static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -934,6 +961,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
        SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
        SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
index c19c81d..b4f1b6e 100644 (file)
@@ -55,10 +55,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
 #define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
 #define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
                                ((codec)->core.vendor_id == 0x80862800))
+#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
 #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
                                || is_skylake(codec) || is_broxton(codec) \
-                               || is_kabylake(codec)) || is_geminilake(codec)
-
+                               || is_kabylake(codec)) || is_geminilake(codec) \
+                               || is_cannonlake(codec)
 #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
 #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
 #define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
@@ -3841,6 +3842,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI",     patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI",    patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI",    patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI",   patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
index 921a10e..2347588 100644 (file)
@@ -324,21 +324,24 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0292:
                alc_update_coef_idx(codec, 0x4, 1<<15, 0);
                break;
-       case 0x10ec0215:
        case 0x10ec0225:
+       case 0x10ec0295:
+       case 0x10ec0299:
+               alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+               /* fallthrough */
+       case 0x10ec0215:
        case 0x10ec0233:
        case 0x10ec0236:
        case 0x10ec0255:
        case 0x10ec0256:
+       case 0x10ec0257:
        case 0x10ec0282:
        case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
        case 0x10ec0285:
-       case 0x10ec0295:
        case 0x10ec0298:
        case 0x10ec0289:
-       case 0x10ec0299:
                alc_update_coef_idx(codec, 0x10, 1<<9, 0);
                break;
        case 0x10ec0275:
@@ -2772,6 +2775,7 @@ enum {
        ALC269_TYPE_ALC298,
        ALC269_TYPE_ALC255,
        ALC269_TYPE_ALC256,
+       ALC269_TYPE_ALC257,
        ALC269_TYPE_ALC215,
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
@@ -2805,6 +2809,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC298:
        case ALC269_TYPE_ALC255:
        case ALC269_TYPE_ALC256:
+       case ALC269_TYPE_ALC257:
        case ALC269_TYPE_ALC215:
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
@@ -3149,11 +3154,13 @@ static void alc256_shutup(struct hda_codec *codec)
        if (hp_pin_sense)
                msleep(85);
 
+       /* 3k pull low control for Headset jack. */
+       /* NOTE: call this before clearing the pin, otherwise codec stalls */
+       alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+
        snd_hda_codec_write(codec, hp_pin, 0,
                            AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
-       alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */
-
        if (hp_pin_sense)
                msleep(100);
 
@@ -3161,6 +3168,93 @@ static void alc256_shutup(struct hda_codec *codec)
        snd_hda_shutup_pins(codec);
 }
 
+static void alc225_init(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       bool hp1_pin_sense, hp2_pin_sense;
+
+       if (!hp_pin)
+               return;
+
+       msleep(30);
+
+       hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+       hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+
+       if (hp1_pin_sense || hp2_pin_sense)
+               msleep(2);
+
+       alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+
+       if (hp1_pin_sense)
+               snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+       if (hp2_pin_sense)
+               snd_hda_codec_write(codec, 0x16, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+       if (hp1_pin_sense || hp2_pin_sense)
+               msleep(85);
+
+       if (hp1_pin_sense)
+               snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+       if (hp2_pin_sense)
+               snd_hda_codec_write(codec, 0x16, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+
+       if (hp1_pin_sense || hp2_pin_sense)
+               msleep(100);
+
+       alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+       alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+}
+
+static void alc225_shutup(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       bool hp1_pin_sense, hp2_pin_sense;
+
+       if (!hp_pin) {
+               alc269_shutup(codec);
+               return;
+       }
+
+       /* 3k pull low control for Headset jack. */
+       alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
+
+       hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+       hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+
+       if (hp1_pin_sense || hp2_pin_sense)
+               msleep(2);
+
+       if (hp1_pin_sense)
+               snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+       if (hp2_pin_sense)
+               snd_hda_codec_write(codec, 0x16, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+       if (hp1_pin_sense || hp2_pin_sense)
+               msleep(85);
+
+       if (hp1_pin_sense)
+               snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+       if (hp2_pin_sense)
+               snd_hda_codec_write(codec, 0x16, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+       if (hp1_pin_sense || hp2_pin_sense)
+               msleep(100);
+
+       alc_auto_setup_eapd(codec, false);
+       snd_hda_shutup_pins(codec);
+}
+
 static void alc_default_init(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
@@ -3718,6 +3812,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
        }
 }
 
+#if IS_REACHABLE(INPUT)
 static void gpio2_mic_hotkey_event(struct hda_codec *codec,
                                   struct hda_jack_callback *event)
 {
@@ -3850,6 +3945,10 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
                spec->kb_dev = NULL;
        }
 }
+#else /* INPUT */
+#define alc280_fixup_hp_gpio2_mic_hotkey       NULL
+#define alc233_fixup_lenovo_line2_mic_hotkey   NULL
+#endif /* INPUT */
 
 static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
@@ -3989,8 +4088,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0668);
                break;
+       case 0x10ec0215:
        case 0x10ec0225:
+       case 0x10ec0285:
        case 0x10ec0295:
+       case 0x10ec0289:
        case 0x10ec0299:
                alc_process_coef_fw(codec, coef0225);
                break;
@@ -4112,8 +4214,11 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
                alc_process_coef_fw(codec, coef0688);
                snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
                break;
+       case 0x10ec0215:
        case 0x10ec0225:
+       case 0x10ec0285:
        case 0x10ec0295:
+       case 0x10ec0289:
        case 0x10ec0299:
                alc_process_coef_fw(codec, alc225_pre_hsmode);
                alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
@@ -4184,8 +4289,11 @@ static void alc_headset_mode_default(struct hda_codec *codec)
        };
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0215:
        case 0x10ec0225:
+       case 0x10ec0285:
        case 0x10ec0295:
+       case 0x10ec0289:
        case 0x10ec0299:
                alc_process_coef_fw(codec, alc225_pre_hsmode);
                alc_process_coef_fw(codec, coef0225);
@@ -4327,8 +4435,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0688);
                break;
+       case 0x10ec0215:
        case 0x10ec0225:
+       case 0x10ec0285:
        case 0x10ec0295:
+       case 0x10ec0289:
        case 0x10ec0299:
                val = alc_read_coef_idx(codec, 0x45);
                if (val & (1 << 9))
@@ -4431,8 +4542,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0688);
                break;
+       case 0x10ec0215:
        case 0x10ec0225:
+       case 0x10ec0285:
        case 0x10ec0295:
+       case 0x10ec0289:
        case 0x10ec0299:
                alc_process_coef_fw(codec, coef0225);
                break;
@@ -4561,9 +4675,18 @@ static void alc_determine_headset_type(struct hda_codec *codec)
                val = alc_read_coef_idx(codec, 0xbe);
                is_ctia = (val & 0x1c02) == 0x1c02;
                break;
+       case 0x10ec0215:
        case 0x10ec0225:
+       case 0x10ec0285:
        case 0x10ec0295:
+       case 0x10ec0289:
        case 0x10ec0299:
+               snd_hda_codec_write(codec, 0x21, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+               msleep(80);
+               snd_hda_codec_write(codec, 0x21, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
                alc_process_coef_fw(codec, alc225_pre_hsmode);
                alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);
                val = alc_read_coef_idx(codec, 0x45);
@@ -4583,6 +4706,12 @@ static void alc_determine_headset_type(struct hda_codec *codec)
                alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);
                alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);
                alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+
+               snd_hda_codec_write(codec, 0x21, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+               msleep(80);
+               snd_hda_codec_write(codec, 0x21, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
                break;
        case 0x10ec0867:
                is_ctia = true;
@@ -5182,6 +5311,22 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
        }
 }
 
+/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
+static void alc274_fixup_bind_dacs(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       static hda_nid_t preferred_pairs[] = {
+               0x21, 0x03, 0x1b, 0x03, 0x16, 0x02,
+               0
+       };
+
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       spec->gen.preferred_dacs = preferred_pairs;
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -5299,6 +5444,8 @@ enum {
        ALC233_FIXUP_LENOVO_MULTI_CODECS,
        ALC294_FIXUP_LENOVO_MIC_LOCATION,
        ALC700_FIXUP_INTEL_REFERENCE,
+       ALC274_FIXUP_DELL_BIND_DACS,
+       ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6109,6 +6256,21 @@ static const struct hda_fixup alc269_fixups[] = {
                        {}
                }
        },
+       [ALC274_FIXUP_DELL_BIND_DACS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc274_fixup_bind_dacs,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
+       [ALC274_FIXUP_DELL_AIO_LINEOUT_VERB] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x0401102f },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC274_FIXUP_DELL_BIND_DACS
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6158,6 +6320,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
+       SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6292,6 +6455,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6549,6 +6713,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x1b, 0x01011020},
                {0x21, 0x02211010}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
@@ -6575,7 +6744,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
                {0x21, 0x03211020}),
-       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x12, 0xb7a60130},
                {0x13, 0xb8a61140},
                {0x16, 0x90170110},
@@ -6867,20 +7036,25 @@ static int patch_alc269(struct hda_codec *codec)
                spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
                break;
+       case 0x10ec0257:
+               spec->codec_variant = ALC269_TYPE_ALC257;
+               spec->gen.mixer_nid = 0;
+               break;
        case 0x10ec0215:
        case 0x10ec0285:
        case 0x10ec0289:
                spec->codec_variant = ALC269_TYPE_ALC215;
+               spec->shutup = alc225_shutup;
+               spec->init_hook = alc225_init;
                spec->gen.mixer_nid = 0;
                break;
        case 0x10ec0225:
        case 0x10ec0295:
-               spec->codec_variant = ALC269_TYPE_ALC225;
-               spec->gen.mixer_nid = 0; /* no loopback on ALC225 ALC295 */
-               break;
        case 0x10ec0299:
                spec->codec_variant = ALC269_TYPE_ALC225;
-               spec->gen.mixer_nid = 0; /* no loopback on ALC299 */
+               spec->shutup = alc225_shutup;
+               spec->init_hook = alc225_init;
+               spec->gen.mixer_nid = 0; /* no loopback on ALC225, ALC295 and ALC299 */
                break;
        case 0x10ec0234:
        case 0x10ec0274:
@@ -7914,6 +8088,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
        HDA_CODEC_ENTRY(0x10ec0262, "ALC262", patch_alc262),
        HDA_CODEC_ENTRY(0x10ec0267, "ALC267", patch_alc268),
index 2697402..8dabd4d 100644 (file)
@@ -965,13 +965,32 @@ static int prodigy_hd2_add_controls(struct snd_ice1712 *ice)
        return 0;
 }
 
+static void wm8766_init(struct snd_ice1712 *ice)
+{
+       static unsigned short wm8766_inits[] = {
+               WM8766_RESET,      0x0000,
+               WM8766_DAC_CTRL,        0x0120,
+               WM8766_INT_CTRL,        0x0022, /* I2S Normal Mode, 24 bit */
+               WM8766_DAC_CTRL2,       0x0001,
+               WM8766_DAC_CTRL3,       0x0080,
+               WM8766_LDA1,        0x0100,
+               WM8766_LDA2,        0x0100,
+               WM8766_LDA3,        0x0100,
+               WM8766_RDA1,        0x0100,
+               WM8766_RDA2,        0x0100,
+               WM8766_RDA3,        0x0100,
+               WM8766_MUTE1,      0x0000,
+               WM8766_MUTE2,      0x0000,
+       };
+       unsigned int i;
 
-/*
- * initialize the chip
- */
-static int prodigy_hifi_init(struct snd_ice1712 *ice)
+       for (i = 0; i < ARRAY_SIZE(wm8766_inits); i += 2)
+               wm8766_spi_write(ice, wm8766_inits[i], wm8766_inits[i + 1]);
+}
+
+static void wm8776_init(struct snd_ice1712 *ice)
 {
-       static unsigned short wm_inits[] = {
+       static unsigned short wm8776_inits[] = {
                /* These come first to reduce init pop noise */
                WM_ADC_MUX,     0x0003, /* ADC mute */
                /* 0x00c0 replaced by 0x0003 */
@@ -982,7 +1001,76 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
                WM_POWERDOWN,   0x0008, /* All power-up except HP */
                WM_RESET,       0x0000, /* reset */
        };
-       static unsigned short wm_inits2[] = {
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(wm8776_inits); i += 2)
+               wm_put(ice, wm8776_inits[i], wm8776_inits[i + 1]);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int prodigy_hifi_resume(struct snd_ice1712 *ice)
+{
+       static unsigned short wm8776_reinit_registers[] = {
+               WM_MASTER_CTRL,
+               WM_DAC_INT,
+               WM_ADC_INT,
+               WM_OUT_MUX,
+               WM_HP_ATTEN_L,
+               WM_HP_ATTEN_R,
+               WM_PHASE_SWAP,
+               WM_DAC_CTRL2,
+               WM_ADC_ATTEN_L,
+               WM_ADC_ATTEN_R,
+               WM_ALC_CTRL1,
+               WM_ALC_CTRL2,
+               WM_ALC_CTRL3,
+               WM_NOISE_GATE,
+               WM_ADC_MUX,
+               /* no DAC attenuation here */
+       };
+       struct prodigy_hifi_spec *spec = ice->spec;
+       int i, ch;
+
+       mutex_lock(&ice->gpio_mutex);
+
+       /* reinitialize WM8776 and re-apply old register values */
+       wm8776_init(ice);
+       schedule_timeout_uninterruptible(1);
+       for (i = 0; i < ARRAY_SIZE(wm8776_reinit_registers); i++)
+               wm_put(ice, wm8776_reinit_registers[i],
+                      wm_get(ice, wm8776_reinit_registers[i]));
+
+       /* reinitialize WM8766 and re-apply volumes for all DACs */
+       wm8766_init(ice);
+       for (ch = 0; ch < 2; ch++) {
+               wm_set_vol(ice, WM_DAC_ATTEN_L + ch,
+                          spec->vol[2 + ch], spec->master[ch]);
+
+               wm8766_set_vol(ice, WM8766_LDA1 + ch,
+                              spec->vol[0 + ch], spec->master[ch]);
+
+               wm8766_set_vol(ice, WM8766_LDA2 + ch,
+                              spec->vol[4 + ch], spec->master[ch]);
+
+               wm8766_set_vol(ice, WM8766_LDA3 + ch,
+                              spec->vol[6 + ch], spec->master[ch]);
+       }
+
+       /* unmute WM8776 DAC */
+       wm_put(ice, WM_DAC_MUTE, 0x00);
+       wm_put(ice, WM_DAC_CTRL1, 0x90);
+
+       mutex_unlock(&ice->gpio_mutex);
+       return 0;
+}
+#endif
+
+/*
+ * initialize the chip
+ */
+static int prodigy_hifi_init(struct snd_ice1712 *ice)
+{
+       static unsigned short wm8776_defaults[] = {
                WM_MASTER_CTRL,  0x0022, /* 256fs, slave mode */
                WM_DAC_INT,     0x0022, /* I2S, normal polarity, 24bit */
                WM_ADC_INT,     0x0022, /* I2S, normal polarity, 24bit */
@@ -1010,22 +1098,6 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
                WM_DAC_MUTE,    0x0000, /* DAC unmute */
                WM_ADC_MUX,     0x0003, /* ADC unmute, both CD/Line On */
        };
-       static unsigned short wm8766_inits[] = {
-               WM8766_RESET,      0x0000,
-               WM8766_DAC_CTRL,        0x0120,
-               WM8766_INT_CTRL,        0x0022, /* I2S Normal Mode, 24 bit */
-               WM8766_DAC_CTRL2,       0x0001,
-               WM8766_DAC_CTRL3,       0x0080,
-               WM8766_LDA1,        0x0100,
-               WM8766_LDA2,        0x0100,
-               WM8766_LDA3,        0x0100,
-               WM8766_RDA1,        0x0100,
-               WM8766_RDA2,        0x0100,
-               WM8766_RDA3,        0x0100,
-               WM8766_MUTE1,      0x0000,
-               WM8766_MUTE2,      0x0000,
-       };
-
        struct prodigy_hifi_spec *spec;
        unsigned int i;
 
@@ -1052,16 +1124,17 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
        ice->spec = spec;
 
        /* initialize WM8776 codec */
-       for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2)
-               wm_put(ice, wm_inits[i], wm_inits[i+1]);
+       wm8776_init(ice);
        schedule_timeout_uninterruptible(1);
-       for (i = 0; i < ARRAY_SIZE(wm_inits2); i += 2)
-               wm_put(ice, wm_inits2[i], wm_inits2[i+1]);
+       for (i = 0; i < ARRAY_SIZE(wm8776_defaults); i += 2)
+               wm_put(ice, wm8776_defaults[i], wm8776_defaults[i + 1]);
 
-       /* initialize WM8766 codec */
-       for (i = 0; i < ARRAY_SIZE(wm8766_inits); i += 2)
-               wm8766_spi_write(ice, wm8766_inits[i], wm8766_inits[i+1]);
+       wm8766_init(ice);
 
+#ifdef CONFIG_PM_SLEEP
+       ice->pm_resume = &prodigy_hifi_resume;
+       ice->pm_suspend_enabled = 1;
+#endif
 
        return 0;
 }
index c7b0071..4206ba4 100644 (file)
@@ -2348,7 +2348,6 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
 
        err = request_firmware(&dsp_code, "korg/k1212.dsp", &pci->dev);
        if (err < 0) {
-               release_firmware(dsp_code);
                snd_printk(KERN_ERR "firmware not available\n");
                snd_korg1212_free(korg1212);
                return err;
index d227581..84c3582 100644 (file)
@@ -71,6 +71,7 @@ source "sound/soc/stm/Kconfig"
 source "sound/soc/sunxi/Kconfig"
 source "sound/soc/tegra/Kconfig"
 source "sound/soc/txx9/Kconfig"
+source "sound/soc/uniphier/Kconfig"
 source "sound/soc/ux500/Kconfig"
 source "sound/soc/xtensa/Kconfig"
 source "sound/soc/zte/Kconfig"
index 5327f4d..74cd185 100644 (file)
@@ -55,6 +55,7 @@ obj-$(CONFIG_SND_SOC) += stm/
 obj-$(CONFIG_SND_SOC)  += sunxi/
 obj-$(CONFIG_SND_SOC)  += tegra/
 obj-$(CONFIG_SND_SOC)  += txx9/
+obj-$(CONFIG_SND_SOC)  += uniphier/
 obj-$(CONFIG_SND_SOC)  += ux500/
 obj-$(CONFIG_SND_SOC)  += xtensa/
 obj-$(CONFIG_SND_SOC)  += zte/
index 9f521a5..c33a512 100644 (file)
@@ -850,6 +850,9 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct audio_substream_data *rtd = runtime->private_data;
 
+       if (!rtd)
+               return -EINVAL;
+
        buffersize = frames_to_bytes(runtime, runtime->buffer_size);
        bytescount = acp_get_byte_count(rtd->acp_mmio, substream->stream);
 
@@ -875,6 +878,8 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct audio_substream_data *rtd = runtime->private_data;
 
+       if (!rtd)
+               return -EINVAL;
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                config_acp_dma_channel(rtd->acp_mmio, SYSRAM_TO_ACP_CH_NUM,
                                        PLAYBACK_START_DMA_DESCR_CH12,
@@ -1051,6 +1056,11 @@ static int acp_audio_probe(struct platform_device *pdev)
        struct resource *res;
        const u32 *pdata = pdev->dev.platform_data;
 
+       if (!pdata) {
+               dev_err(&pdev->dev, "Missing platform data\n");
+               return -ENODEV;
+       }
+
        audio_drv_data = devm_kzalloc(&pdev->dev, sizeof(struct audio_drv_data),
                                        GFP_KERNEL);
        if (audio_drv_data == NULL)
@@ -1058,6 +1068,8 @@ static int acp_audio_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        audio_drv_data->acp_mmio = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(audio_drv_data->acp_mmio))
+               return PTR_ERR(audio_drv_data->acp_mmio);
 
        /* The following members gets populated in device 'open'
         * function. Till then interrupts are disabled in 'acp_init'
@@ -1084,7 +1096,11 @@ static int acp_audio_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, audio_drv_data);
 
        /* Initialize the ACP */
-       acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
+       status = acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
+       if (status) {
+               dev_err(&pdev->dev, "ACP Init failed status:%d\n", status);
+               return status;
+       }
 
        status = snd_soc_register_platform(&pdev->dev, &acp_asoc_platform);
        if (status != 0) {
@@ -1101,9 +1117,12 @@ static int acp_audio_probe(struct platform_device *pdev)
 
 static int acp_audio_remove(struct platform_device *pdev)
 {
+       int status;
        struct audio_drv_data *adata = dev_get_drvdata(&pdev->dev);
 
-       acp_deinit(adata->acp_mmio);
+       status = acp_deinit(adata->acp_mmio);
+       if (status)
+               dev_err(&pdev->dev, "ACP Deinit failed status:%d\n", status);
        snd_soc_unregister_platform(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
@@ -1113,9 +1132,14 @@ static int acp_audio_remove(struct platform_device *pdev)
 static int acp_pcm_resume(struct device *dev)
 {
        u16 bank;
+       int status;
        struct audio_drv_data *adata = dev_get_drvdata(dev);
 
-       acp_init(adata->acp_mmio, adata->asic_type);
+       status = acp_init(adata->acp_mmio, adata->asic_type);
+       if (status) {
+               dev_err(dev, "ACP Init failed status:%d\n", status);
+               return status;
+       }
 
        if (adata->play_stream && adata->play_stream->runtime) {
                /* For Stoney, Memory gating is disabled,i.e SRAM Banks
@@ -1147,18 +1171,26 @@ static int acp_pcm_resume(struct device *dev)
 
 static int acp_pcm_runtime_suspend(struct device *dev)
 {
+       int status;
        struct audio_drv_data *adata = dev_get_drvdata(dev);
 
-       acp_deinit(adata->acp_mmio);
+       status = acp_deinit(adata->acp_mmio);
+       if (status)
+               dev_err(dev, "ACP Deinit failed status:%d\n", status);
        acp_reg_write(0, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
        return 0;
 }
 
 static int acp_pcm_runtime_resume(struct device *dev)
 {
+       int status;
        struct audio_drv_data *adata = dev_get_drvdata(dev);
 
-       acp_init(adata->acp_mmio, adata->asic_type);
+       status = acp_init(adata->acp_mmio, adata->asic_type);
+       if (status) {
+               dev_err(dev, "ACP Init failed status:%d\n", status);
+               return status;
+       }
        acp_reg_write(1, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
        return 0;
 }
index 4a56f3d..dcee145 100644 (file)
@@ -64,7 +64,7 @@ config SND_AT91_SOC_SAM9X5_WM8731
 config SND_ATMEL_SOC_CLASSD
        tristate "Atmel ASoC driver for boards using CLASSD"
        depends on ARCH_AT91 || COMPILE_TEST
-       select SND_ATMEL_SOC_DMA
+       select SND_SOC_GENERIC_DMAENGINE_PCM
        select REGMAP_MMIO
        help
          Say Y if you want to add support for Atmel ASoC driver for boards using
index 8445edd..ebabed6 100644 (file)
@@ -308,15 +308,9 @@ static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
        return regcache_sync(dd->regmap);
 }
 
-static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
-{
-       return dev_get_regmap(dev, NULL);
-}
-
 static struct snd_soc_codec_driver soc_codec_dev_classd = {
        .probe          = atmel_classd_codec_probe,
        .resume         = atmel_classd_codec_resume,
-       .get_regmap     = atmel_classd_codec_get_remap,
        .component_driver = {
                .controls               = atmel_classd_snd_controls,
                .num_controls           = ARRAY_SIZE(atmel_classd_snd_controls),
index 29a97d5..66d6c52 100644 (file)
@@ -91,8 +91,8 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
        do {
                mutex_lock(&ctx->lock);
 
-               tmo = 5;
-               while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+               tmo = 6;
+               while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
                        udelay(21);     /* wait an ac97 frame time */
                if (!tmo) {
                        pr_debug("ac97rd timeout #1\n");
@@ -105,7 +105,7 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
                 * poll, Forrest, poll...
                 */
                tmo = 0x10000;
-               while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+               while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
                        asm volatile ("nop");
                data = RD(ctx, AC97_CMDRESP);
 
index 2e449d7..d5f73a8 100644 (file)
@@ -130,6 +130,7 @@ struct bcm2835_i2s_dev {
        struct regmap                           *i2s_regmap;
        struct clk                              *clk;
        bool                                    clk_prepared;
+       int                                     clk_rate;
 };
 
 static void bcm2835_i2s_start_clock(struct bcm2835_i2s_dev *dev)
@@ -419,10 +420,19 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
        }
 
        /* Clock should only be set up here if CPU is clock master */
-       if (bit_clock_master) {
-               ret = clk_set_rate(dev->clk, bclk_rate);
-               if (ret)
-                       return ret;
+       if (bit_clock_master &&
+           (!dev->clk_prepared || dev->clk_rate != bclk_rate)) {
+               if (dev->clk_prepared)
+                       bcm2835_i2s_stop_clock(dev);
+
+               if (dev->clk_rate != bclk_rate) {
+                       ret = clk_set_rate(dev->clk, bclk_rate);
+                       if (ret)
+                               return ret;
+                       dev->clk_rate = bclk_rate;
+               }
+
+               bcm2835_i2s_start_clock(dev);
        }
 
        /* Setup the frame format */
@@ -618,8 +628,6 @@ static int bcm2835_i2s_prepare(struct snd_pcm_substream *substream,
        struct bcm2835_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
        uint32_t cs_reg;
 
-       bcm2835_i2s_start_clock(dev);
-
        /*
         * Clear both FIFOs if the one that should be started
         * is not empty at the moment. This should only happen
index bbf7a92..cd5a939 100644 (file)
@@ -365,7 +365,7 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
 {
        struct ep93xx_ac97_info *info;
        struct resource *res;
-       unsigned int irq;
+       int irq;
        int ret;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -378,8 +378,8 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
                return PTR_ERR(info->regs);
 
        irq = platform_get_irq(pdev, 0);
-       if (!irq)
-               return -ENODEV;
+       if (irq <= 0)
+               return irq < 0 ? irq : -ENODEV;
 
        ret = devm_request_irq(&pdev->dev, irq, ep93xx_ac97_interrupt,
                               IRQF_TRIGGER_HIGH, pdev->name, info);
index 848c5fe..be8ea72 100644 (file)
@@ -1319,6 +1319,7 @@ static int pm860x_probe(struct snd_soc_codec *codec)
        int i, ret;
 
        pm860x->codec = codec;
+       snd_soc_codec_init_regmap(codec,  pm860x->regmap);
 
        for (i = 0; i < 4; i++) {
                ret = request_threaded_irq(pm860x->irq[i], NULL,
@@ -1348,18 +1349,10 @@ static int pm860x_remove(struct snd_soc_codec *codec)
        return 0;
 }
 
-static struct regmap *pm860x_get_regmap(struct device *dev)
-{
-       struct pm860x_priv *pm860x = dev_get_drvdata(dev);
-
-       return pm860x->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_pm860x = {
        .probe          = pm860x_probe,
        .remove         = pm860x_remove,
        .set_bias_level = pm860x_set_bias_level,
-       .get_regmap     = pm860x_get_regmap,
 
        .component_driver = {
                .controls               = pm860x_snd_controls,
index a42ddbc..2b331f7 100644 (file)
@@ -95,6 +95,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_MAX98925 if I2C
        select SND_SOC_MAX98926 if I2C
        select SND_SOC_MAX98927 if I2C
+       select SND_SOC_MAX98373 if I2C
        select SND_SOC_MAX9850 if I2C
        select SND_SOC_MAX9860 if I2C
        select SND_SOC_MAX9768 if I2C
@@ -109,6 +110,8 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_PCM1681 if I2C
        select SND_SOC_PCM179X_I2C if I2C
        select SND_SOC_PCM179X_SPI if SPI_MASTER
+       select SND_SOC_PCM186X_I2C if I2C
+       select SND_SOC_PCM186X_SPI if SPI_MASTER
        select SND_SOC_PCM3008
        select SND_SOC_PCM3168A_I2C if I2C
        select SND_SOC_PCM3168A_SPI if SPI_MASTER
@@ -133,7 +136,6 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_SGTL5000 if I2C
        select SND_SOC_SI476X if MFD_SI476X_CORE
        select SND_SOC_SIRF_AUDIO_CODEC
-       select SND_SOC_SN95031 if INTEL_SCU_IPC
        select SND_SOC_SPDIF
        select SND_SOC_SSM2518 if I2C
        select SND_SOC_SSM2602_SPI if SPI_MASTER
@@ -148,6 +150,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_TAS5086 if I2C
        select SND_SOC_TAS571X if I2C
        select SND_SOC_TAS5720 if I2C
+       select SND_SOC_TAS6424 if I2C
        select SND_SOC_TFA9879 if I2C
        select SND_SOC_TLV320AIC23_I2C if I2C
        select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
@@ -158,6 +161,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_TLV320AIC3X if I2C
        select SND_SOC_TPA6130A2 if I2C
        select SND_SOC_TLV320DAC33 if I2C
+       select SND_SOC_TSCS42XX if I2C
        select SND_SOC_TS3A227E if I2C
        select SND_SOC_TWL4030 if TWL4030_CORE
        select SND_SOC_TWL6040 if TWL6040_CORE
@@ -623,6 +627,10 @@ config SND_SOC_MAX98927
        tristate "Maxim Integrated MAX98927 Speaker Amplifier"
        depends on I2C
 
+config SND_SOC_MAX98373
+       tristate "Maxim Integrated MAX98373 Speaker Amplifier"
+       depends on I2C
+
 config SND_SOC_MAX9850
        tristate
 
@@ -661,6 +669,21 @@ config SND_SOC_PCM179X_SPI
          Enable support for Texas Instruments PCM179x CODEC.
          Select this if your PCM179x is connected via an SPI bus.
 
+config SND_SOC_PCM186X
+       tristate
+
+config SND_SOC_PCM186X_I2C
+       tristate "Texas Instruments PCM186x CODECs - I2C"
+       depends on I2C
+       select SND_SOC_PCM186X
+       select REGMAP_I2C
+
+config SND_SOC_PCM186X_SPI
+       tristate "Texas Instruments PCM186x CODECs - SPI"
+       depends on SPI_MASTER
+       select SND_SOC_PCM186X
+       select REGMAP_SPI
+
 config SND_SOC_PCM3008
        tristate
 
@@ -818,9 +841,6 @@ config SND_SOC_SIRF_AUDIO_CODEC
        tristate "SiRF SoC internal audio codec"
        select REGMAP_MMIO
 
-config SND_SOC_SN95031
-       tristate
-
 config SND_SOC_SPDIF
        tristate "S/PDIF CODEC"
 
@@ -883,6 +903,13 @@ config SND_SOC_TAS5720
          Enable support for Texas Instruments TAS5720L/M high-efficiency mono
          Class-D audio power amplifiers.
 
+config SND_SOC_TAS6424
+       tristate "Texas Instruments TAS6424 Quad-Channel Audio amplifier"
+       depends on I2C
+       help
+         Enable support for Texas Instruments TAS6424 high-efficiency
+         digital input quad-channel Class-D audio power amplifiers.
+
 config SND_SOC_TFA9879
        tristate "NXP Semiconductors TFA9879 amplifier"
        depends on I2C
@@ -913,12 +940,12 @@ config SND_SOC_TLV320AIC32X4
        tristate
 
 config SND_SOC_TLV320AIC32X4_I2C
-       tristate
+       tristate "Texas Instruments TLV320AIC32x4 audio CODECs - I2C"
        depends on I2C
        select SND_SOC_TLV320AIC32X4
 
 config SND_SOC_TLV320AIC32X4_SPI
-       tristate
+       tristate "Texas Instruments TLV320AIC32x4 audio CODECs - SPI"
        depends on SPI_MASTER
        select SND_SOC_TLV320AIC32X4
 
@@ -933,6 +960,13 @@ config SND_SOC_TS3A227E
        tristate "TI Headset/Mic detect and keypress chip"
        depends on I2C
 
+config SND_SOC_TSCS42XX
+       tristate "Tempo Semiconductor TSCS42xx CODEC"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         Add support for Tempo Semiconductor's TSCS42xx audio CODEC.
+
 config SND_SOC_TWL4030
        select MFD_TWL4030_AUDIO
        tristate
index 0001069..da15713 100644 (file)
@@ -90,6 +90,7 @@ snd-soc-max9867-objs := max9867.o
 snd-soc-max98925-objs := max98925.o
 snd-soc-max98926-objs := max98926.o
 snd-soc-max98927-objs := max98927.o
+snd-soc-max98373-objs := max98373.o
 snd-soc-max9850-objs := max9850.o
 snd-soc-max9860-objs := max9860.o
 snd-soc-mc13783-objs := mc13783.o
@@ -105,6 +106,9 @@ snd-soc-pcm1681-objs := pcm1681.o
 snd-soc-pcm179x-codec-objs := pcm179x.o
 snd-soc-pcm179x-i2c-objs := pcm179x-i2c.o
 snd-soc-pcm179x-spi-objs := pcm179x-spi.o
+snd-soc-pcm186x-objs := pcm186x.o
+snd-soc-pcm186x-i2c-objs := pcm186x-i2c.o
+snd-soc-pcm186x-spi-objs := pcm186x-spi.o
 snd-soc-pcm3008-objs := pcm3008.o
 snd-soc-pcm3168a-objs := pcm3168a.o
 snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
@@ -140,7 +144,6 @@ snd-soc-sigmadsp-i2c-objs := sigmadsp-i2c.o
 snd-soc-sigmadsp-regmap-objs := sigmadsp-regmap.o
 snd-soc-si476x-objs := si476x.o
 snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o
-snd-soc-sn95031-objs := sn95031.o
 snd-soc-spdif-tx-objs := spdif_transmitter.o
 snd-soc-spdif-rx-objs := spdif_receiver.o
 snd-soc-ssm2518-objs := ssm2518.o
@@ -156,6 +159,7 @@ snd-soc-sti-sas-objs := sti-sas.o
 snd-soc-tas5086-objs := tas5086.o
 snd-soc-tas571x-objs := tas571x.o
 snd-soc-tas5720-objs := tas5720.o
+snd-soc-tas6424-objs := tas6424.o
 snd-soc-tfa9879-objs := tfa9879.o
 snd-soc-tlv320aic23-objs := tlv320aic23.o
 snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -167,6 +171,7 @@ snd-soc-tlv320aic32x4-i2c-objs := tlv320aic32x4-i2c.o
 snd-soc-tlv320aic32x4-spi-objs := tlv320aic32x4-spi.o
 snd-soc-tlv320aic3x-objs := tlv320aic3x.o
 snd-soc-tlv320dac33-objs := tlv320dac33.o
+snd-soc-tscs42xx-objs := tscs42xx.o
 snd-soc-ts3a227e-objs := ts3a227e.o
 snd-soc-twl4030-objs := twl4030.o
 snd-soc-twl6040-objs := twl6040.o
@@ -330,6 +335,7 @@ obj-$(CONFIG_SND_SOC_MAX9867)       += snd-soc-max9867.o
 obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
 obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
 obj-$(CONFIG_SND_SOC_MAX98927) += snd-soc-max98927.o
+obj-$(CONFIG_SND_SOC_MAX98373) += snd-soc-max98373.o
 obj-$(CONFIG_SND_SOC_MAX9850)  += snd-soc-max9850.o
 obj-$(CONFIG_SND_SOC_MAX9860)  += snd-soc-max9860.o
 obj-$(CONFIG_SND_SOC_MC13783)  += snd-soc-mc13783.o
@@ -345,6 +351,9 @@ obj-$(CONFIG_SND_SOC_PCM1681)       += snd-soc-pcm1681.o
 obj-$(CONFIG_SND_SOC_PCM179X)  += snd-soc-pcm179x-codec.o
 obj-$(CONFIG_SND_SOC_PCM179X_I2C)      += snd-soc-pcm179x-i2c.o
 obj-$(CONFIG_SND_SOC_PCM179X_SPI)      += snd-soc-pcm179x-spi.o
+obj-$(CONFIG_SND_SOC_PCM186X)  += snd-soc-pcm186x.o
+obj-$(CONFIG_SND_SOC_PCM186X_I2C)      += snd-soc-pcm186x-i2c.o
+obj-$(CONFIG_SND_SOC_PCM186X_SPI)      += snd-soc-pcm186x-spi.o
 obj-$(CONFIG_SND_SOC_PCM3008)  += snd-soc-pcm3008.o
 obj-$(CONFIG_SND_SOC_PCM3168A) += snd-soc-pcm3168a.o
 obj-$(CONFIG_SND_SOC_PCM3168A_I2C)     += snd-soc-pcm3168a-i2c.o
@@ -395,6 +404,7 @@ obj-$(CONFIG_SND_SOC_TAS2552)       += snd-soc-tas2552.o
 obj-$(CONFIG_SND_SOC_TAS5086)  += snd-soc-tas5086.o
 obj-$(CONFIG_SND_SOC_TAS571X)  += snd-soc-tas571x.o
 obj-$(CONFIG_SND_SOC_TAS5720)  += snd-soc-tas5720.o
+obj-$(CONFIG_SND_SOC_TAS6424)  += snd-soc-tas6424.o
 obj-$(CONFIG_SND_SOC_TFA9879)  += snd-soc-tfa9879.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23)      += snd-soc-tlv320aic23.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C)  += snd-soc-tlv320aic23-i2c.o
@@ -406,6 +416,7 @@ obj-$(CONFIG_SND_SOC_TLV320AIC32X4_I2C)     += snd-soc-tlv320aic32x4-i2c.o
 obj-$(CONFIG_SND_SOC_TLV320AIC32X4_SPI)        += snd-soc-tlv320aic32x4-spi.o
 obj-$(CONFIG_SND_SOC_TLV320AIC3X)      += snd-soc-tlv320aic3x.o
 obj-$(CONFIG_SND_SOC_TLV320DAC33)      += snd-soc-tlv320dac33.o
+obj-$(CONFIG_SND_SOC_TSCS42XX) += snd-soc-tscs42xx.o
 obj-$(CONFIG_SND_SOC_TS3A227E) += snd-soc-ts3a227e.o
 obj-$(CONFIG_SND_SOC_TWL4030)  += snd-soc-twl4030.o
 obj-$(CONFIG_SND_SOC_TWL6040)  += snd-soc-twl6040.o
index 6ed2cc3..3bf9365 100644 (file)
@@ -121,17 +121,19 @@ static struct snd_soc_dai_driver cq93vc_dai = {
        .ops = &cq93vc_dai_ops,
 };
 
-static struct regmap *cq93vc_get_regmap(struct device *dev)
+static int cq93vc_probe(struct snd_soc_component *component)
 {
-       struct davinci_vc *davinci_vc = dev->platform_data;
+       struct davinci_vc *davinci_vc = component->dev->platform_data;
 
-       return davinci_vc->regmap;
+       snd_soc_component_init_regmap(component, davinci_vc->regmap);
+
+       return 0;
 }
 
 static const struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
        .set_bias_level = cq93vc_set_bias_level,
-       .get_regmap = cq93vc_get_regmap,
        .component_driver = {
+               .probe = cq93vc_probe,
                .controls = cq93vc_snd_controls,
                .num_controls = ARRAY_SIZE(cq93vc_snd_controls),
        },
index 7e98062..bc3a72e 100644 (file)
@@ -355,13 +355,9 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
        unsigned int devid = 0;
        unsigned int reg;
 
-
-       cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs35l32_private),
-                              GFP_KERNEL);
-       if (!cs35l32) {
-               dev_err(&i2c_client->dev, "could not allocate codec\n");
+       cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l32), GFP_KERNEL);
+       if (!cs35l32)
                return -ENOMEM;
-       }
 
        i2c_set_clientdata(i2c_client, cs35l32);
 
@@ -375,13 +371,11 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
        if (pdata) {
                cs35l32->pdata = *pdata;
        } else {
-               pdata = devm_kzalloc(&i2c_client->dev,
-                                    sizeof(struct cs35l32_platform_data),
-                               GFP_KERNEL);
-               if (!pdata) {
-                       dev_err(&i2c_client->dev, "could not allocate pdata\n");
+               pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+                                    GFP_KERNEL);
+               if (!pdata)
                        return -ENOMEM;
-               }
+
                if (i2c_client->dev.of_node) {
                        ret = cs35l32_handle_of_data(i2c_client,
                                                     &cs35l32->pdata);
index 1e05026..0600d52 100644 (file)
@@ -1004,13 +1004,9 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
        unsigned int devid = 0;
        unsigned int reg;
 
-       cs35l34 = devm_kzalloc(&i2c_client->dev,
-                              sizeof(struct cs35l34_private),
-                              GFP_KERNEL);
-       if (!cs35l34) {
-               dev_err(&i2c_client->dev, "could not allocate codec\n");
+       cs35l34 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l34), GFP_KERNEL);
+       if (!cs35l34)
                return -ENOMEM;
-       }
 
        i2c_set_clientdata(i2c_client, cs35l34);
        cs35l34->regmap = devm_regmap_init_i2c(i2c_client, &cs35l34_regmap);
@@ -1044,14 +1040,11 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
        if (pdata) {
                cs35l34->pdata = *pdata;
        } else {
-               pdata = devm_kzalloc(&i2c_client->dev,
-                               sizeof(struct cs35l34_platform_data),
-                               GFP_KERNEL);
-               if (!pdata) {
-                       dev_err(&i2c_client->dev,
-                               "could not allocate pdata\n");
+               pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+                                    GFP_KERNEL);
+               if (!pdata)
                        return -ENOMEM;
-               }
+
                if (i2c_client->dev.of_node) {
                        ret = cs35l34_handle_of_data(i2c_client, pdata);
                        if (ret != 0)
index 0d9c4a5..9731e5d 100644 (file)
@@ -1100,8 +1100,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
        unsigned int reg;
        u32 val32;
 
-       cs42l52 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l52_private),
-                              GFP_KERNEL);
+       cs42l52 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l52), GFP_KERNEL);
        if (cs42l52 == NULL)
                return -ENOMEM;
        cs42l52->dev = &i2c_client->dev;
@@ -1115,13 +1114,11 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
        if (pdata) {
                cs42l52->pdata = *pdata;
        } else {
-               pdata = devm_kzalloc(&i2c_client->dev,
-                                    sizeof(struct cs42l52_platform_data),
-                               GFP_KERNEL);
-               if (!pdata) {
-                       dev_err(&i2c_client->dev, "could not allocate pdata\n");
+               pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+                                    GFP_KERNEL);
+               if (!pdata)
                        return -ENOMEM;
-               }
+
                if (i2c_client->dev.of_node) {
                        if (of_property_read_bool(i2c_client->dev.of_node,
                                "cirrus,mica-differential-cfg"))
index cb6ca85..fd7b8d3 100644 (file)
@@ -1190,9 +1190,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
        unsigned int alpha_rev, metal_rev;
        unsigned int reg;
 
-       cs42l56 = devm_kzalloc(&i2c_client->dev,
-                              sizeof(struct cs42l56_private),
-                              GFP_KERNEL);
+       cs42l56 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l56), GFP_KERNEL);
        if (cs42l56 == NULL)
                return -ENOMEM;
        cs42l56->dev = &i2c_client->dev;
@@ -1207,14 +1205,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
        if (pdata) {
                cs42l56->pdata = *pdata;
        } else {
-               pdata = devm_kzalloc(&i2c_client->dev,
-                                    sizeof(struct cs42l56_platform_data),
+               pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
                                     GFP_KERNEL);
-               if (!pdata) {
-                       dev_err(&i2c_client->dev,
-                               "could not allocate pdata\n");
+               if (!pdata)
                        return -ENOMEM;
-               }
+
                if (i2c_client->dev.of_node) {
                        ret = cs42l56_handle_of_data(i2c_client,
                                                     &cs42l56->pdata);
index 3df2c47..aebaa97 100644 (file)
@@ -1289,8 +1289,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
        unsigned int reg;
        u32 val32;
 
-       cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l73_private),
-                              GFP_KERNEL);
+       cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l73), GFP_KERNEL);
        if (!cs42l73)
                return -ENOMEM;
 
@@ -1304,13 +1303,11 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
        if (pdata) {
                cs42l73->pdata = *pdata;
        } else {
-               pdata = devm_kzalloc(&i2c_client->dev,
-                                    sizeof(struct cs42l73_platform_data),
-                               GFP_KERNEL);
-               if (!pdata) {
-                       dev_err(&i2c_client->dev, "could not allocate pdata\n");
+               pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+                                    GFP_KERNEL);
+               if (!pdata)
                        return -ENOMEM;
-               }
+
                if (i2c_client->dev.of_node) {
                        if (of_property_read_u32(i2c_client->dev.of_node,
                                "chgfreq", &val32) >= 0)
@@ -1358,7 +1355,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
        ret = regmap_read(cs42l73->regmap, CS42L73_REVID, &reg);
        if (ret < 0) {
                dev_err(&i2c_client->dev, "Get Revision ID failed\n");
-               return ret;;
+               return ret;
        }
 
        dev_info(&i2c_client->dev,
index 94c0209..be27506 100644 (file)
@@ -1120,9 +1120,11 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
        struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
        struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
        struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->core.arizona;
        int ret;
 
-       priv->core.arizona->dapm = dapm;
+       arizona->dapm = dapm;
+       snd_soc_codec_init_regmap(codec, arizona->regmap);
 
        ret = arizona_init_spk(codec);
        if (ret < 0)
@@ -1175,17 +1177,9 @@ static unsigned int cs47l24_digital_vu[] = {
        ARIZONA_DAC_DIGITAL_VOLUME_4L,
 };
 
-static struct regmap *cs47l24_get_regmap(struct device *dev)
-{
-       struct cs47l24_priv *priv = dev_get_drvdata(dev);
-
-       return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_cs47l24 = {
        .probe = cs47l24_codec_probe,
        .remove = cs47l24_codec_remove,
-       .get_regmap = cs47l24_get_regmap,
 
        .idle_bias_off = true,
 
index 46b1fbb..95bb10b 100644 (file)
@@ -26,8 +26,9 @@
 
 
 struct cx20442_priv {
-       void *control_data;
+       struct tty_struct *tty;
        struct regulator *por;
+       u8 reg_cache;
 };
 
 #define CX20442_PM             0x0
@@ -89,14 +90,14 @@ static const struct snd_soc_dapm_route cx20442_audio_map[] = {
 };
 
 static unsigned int cx20442_read_reg_cache(struct snd_soc_codec *codec,
-                                                       unsigned int reg)
+                                          unsigned int reg)
 {
-       u8 *reg_cache = codec->reg_cache;
+       struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
 
-       if (reg >= codec->driver->reg_cache_size)
+       if (reg >= 1)
                return -EINVAL;
 
-       return reg_cache[reg];
+       return cx20442->reg_cache;
 }
 
 enum v253_vls {
@@ -156,20 +157,19 @@ static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
                                                        unsigned int value)
 {
        struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
-       u8 *reg_cache = codec->reg_cache;
        int vls, vsp, old, len;
        char buf[18];
 
-       if (reg >= codec->driver->reg_cache_size)
+       if (reg >= 1)
                return -EINVAL;
 
-       /* hw_write and control_data pointers required for talking to the modem
+       /* tty and write pointers required for talking to the modem
         * are expected to be set by the line discipline initialization code */
-       if (!codec->hw_write || !cx20442->control_data)
+       if (!cx20442->tty || !cx20442->tty->ops->write)
                return -EIO;
 
-       old = reg_cache[reg];
-       reg_cache[reg] = value;
+       old = cx20442->reg_cache;
+       cx20442->reg_cache = value;
 
        vls = cx20442_pm_to_v253_vls(value);
        if (vls < 0)
@@ -194,13 +194,12 @@ static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
                return -ENOMEM;
 
        dev_dbg(codec->dev, "%s: %s\n", __func__, buf);
-       if (codec->hw_write(cx20442->control_data, buf, len) != len)
+       if (cx20442->tty->ops->write(cx20442->tty, buf, len) != len)
                return -EIO;
 
        return 0;
 }
 
-
 /*
  * Line discpline related code
  *
@@ -252,8 +251,7 @@ static void v253_close(struct tty_struct *tty)
        cx20442 = snd_soc_codec_get_drvdata(codec);
 
        /* Prevent the codec driver from further accessing the modem */
-       codec->hw_write = NULL;
-       cx20442->control_data = NULL;
+       cx20442->tty = NULL;
        codec->component.card->pop_time = 0;
 }
 
@@ -276,12 +274,11 @@ static void v253_receive(struct tty_struct *tty,
 
        cx20442 = snd_soc_codec_get_drvdata(codec);
 
-       if (!cx20442->control_data) {
+       if (!cx20442->tty) {
                /* First modem response, complete setup procedure */
 
                /* Set up codec driver access to modem controls */
-               cx20442->control_data = tty;
-               codec->hw_write = (hw_write_t)tty->ops->write;
+               cx20442->tty = tty;
                codec->component.card->pop_time = 1;
        }
 }
@@ -367,10 +364,9 @@ static int cx20442_codec_probe(struct snd_soc_codec *codec)
        cx20442->por = regulator_get(codec->dev, "POR");
        if (IS_ERR(cx20442->por))
                dev_warn(codec->dev, "failed to get the regulator");
-       cx20442->control_data = NULL;
+       cx20442->tty = NULL;
 
        snd_soc_codec_set_drvdata(codec, cx20442);
-       codec->hw_write = NULL;
        codec->component.card->pop_time = 0;
 
        return 0;
@@ -381,8 +377,8 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
 {
        struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
 
-       if (cx20442->control_data) {
-               struct tty_struct *tty = cx20442->control_data;
+       if (cx20442->tty) {
+               struct tty_struct *tty = cx20442->tty;
                tty_hangup(tty);
        }
 
@@ -396,17 +392,13 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
        return 0;
 }
 
-static const u8 cx20442_reg;
-
 static const struct snd_soc_codec_driver cx20442_codec_dev = {
        .probe =        cx20442_codec_probe,
        .remove =       cx20442_codec_remove,
        .set_bias_level = cx20442_set_bias_level,
-       .reg_cache_default = &cx20442_reg,
-       .reg_cache_size = 1,
-       .reg_word_size = sizeof(u8),
        .read = cx20442_read_reg_cache,
        .write = cx20442_write,
+
        .component_driver = {
                .dapm_widgets           = cx20442_dapm_widgets,
                .num_dapm_widgets       = ARRAY_SIZE(cx20442_dapm_widgets),
index 41d9b1d..b2b4e90 100644 (file)
@@ -1654,10 +1654,8 @@ static struct da7213_platform_data
        u32 fw_val32;
 
        pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
-       if (!pdata) {
-               dev_warn(codec->dev, "Failed to allocate memory for pdata\n");
+       if (!pdata)
                return NULL;
-       }
 
        if (device_property_read_u32(dev, "dlg,micbias1-lvl", &fw_val32) >= 0)
                pdata->micbias1_lvl = da7213_of_micbias_lvl(codec, fw_val32);
@@ -1855,8 +1853,7 @@ static int da7213_i2c_probe(struct i2c_client *i2c,
        struct da7213_priv *da7213;
        int ret;
 
-       da7213 = devm_kzalloc(&i2c->dev, sizeof(struct da7213_priv),
-                             GFP_KERNEL);
+       da7213 = devm_kzalloc(&i2c->dev, sizeof(*da7213), GFP_KERNEL);
        if (!da7213)
                return -ENOMEM;
 
index b2d42ec..96c644a 100644 (file)
@@ -2455,10 +2455,8 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
        u32 of_val32;
 
        pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
-       if (!pdata) {
-               dev_warn(codec->dev, "Failed to allocate memory for pdata\n");
+       if (!pdata)
                return NULL;
-       }
 
        if (of_property_read_u32(np, "dlg,micbias1-lvl-millivolt", &of_val32) >= 0)
                pdata->micbias1_lvl = da7218_of_micbias_lvl(codec, of_val32);
@@ -2520,15 +2518,13 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
        }
 
        if (da7218->dev_id == DA7218_DEV_ID) {
-               hpldet_np = of_find_node_by_name(np, "da7218_hpldet");
+               hpldet_np = of_get_child_by_name(np, "da7218_hpldet");
                if (!hpldet_np)
                        return pdata;
 
                hpldet_pdata = devm_kzalloc(codec->dev, sizeof(*hpldet_pdata),
                                            GFP_KERNEL);
                if (!hpldet_pdata) {
-                       dev_warn(codec->dev,
-                                "Failed to allocate memory for hpldet pdata\n");
                        of_node_put(hpldet_np);
                        return pdata;
                }
@@ -3273,8 +3269,7 @@ static int da7218_i2c_probe(struct i2c_client *i2c,
        struct da7218_priv *da7218;
        int ret;
 
-       da7218 = devm_kzalloc(&i2c->dev, sizeof(struct da7218_priv),
-                             GFP_KERNEL);
+       da7218 = devm_kzalloc(&i2c->dev, sizeof(*da7218), GFP_KERNEL);
        if (!da7218)
                return -ENOMEM;
 
index b88a1ee..c88f974 100644 (file)
@@ -107,8 +107,30 @@ static const struct snd_soc_codec_driver soc_dmic = {
 
 static int dmic_dev_probe(struct platform_device *pdev)
 {
+       int err;
+       u32 chans;
+       struct snd_soc_dai_driver *dai_drv = &dmic_dai;
+
+       if (pdev->dev.of_node) {
+               err = of_property_read_u32(pdev->dev.of_node, "num-channels", &chans);
+               if (err && (err != -ENOENT))
+                       return err;
+
+               if (!err) {
+                       if (chans < 1 || chans > 8)
+                               return -EINVAL;
+
+                       dai_drv = devm_kzalloc(&pdev->dev, sizeof(*dai_drv), GFP_KERNEL);
+                       if (!dai_drv)
+                               return -ENOMEM;
+
+                       memcpy(dai_drv, &dmic_dai, sizeof(*dai_drv));
+                       dai_drv->capture.channels_max = chans;
+               }
+       }
+
        return snd_soc_register_codec(&pdev->dev,
-                       &soc_dmic, &dmic_dai, 1);
+                       &soc_dmic, dai_drv, 1);
 }
 
 static int dmic_dev_remove(struct platform_device *pdev)
index f3b4f4d..dba6f4c 100644 (file)
@@ -136,8 +136,11 @@ struct hdac_hdmi_priv {
        struct mutex pin_mutex;
        struct hdac_chmap chmap;
        struct hdac_hdmi_drv_data *drv_data;
+       struct snd_soc_dai_driver *dai_drv;
 };
 
+#define hdev_to_hdmi_priv(_hdev) ((to_ehdac_device(_hdev))->private_data)
+
 static struct hdac_hdmi_pcm *
 hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi,
                           struct hdac_hdmi_cvt *cvt)
@@ -169,7 +172,7 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
                 * ports.
                 */
                if (pcm->jack_event == 0) {
-                       dev_dbg(&edev->hdac.dev,
+                       dev_dbg(&edev->hdev.dev,
                                        "jack report for pcm=%d\n",
                                        pcm->pcm_id);
                        snd_soc_jack_report(pcm->jack, SND_JACK_AVOUT,
@@ -195,18 +198,18 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
 /*
  * Get the no devices that can be connected to a port on the Pin widget.
  */
-static int hdac_hdmi_get_port_len(struct hdac_ext_device *hdac, hda_nid_t nid)
+static int hdac_hdmi_get_port_len(struct hdac_ext_device *edev, hda_nid_t nid)
 {
        unsigned int caps;
        unsigned int type, param;
 
-       caps = get_wcaps(&hdac->hdac, nid);
+       caps = get_wcaps(&edev->hdev, nid);
        type = get_wcaps_type(caps);
 
        if (!(caps & AC_WCAP_DIGITAL) || (type != AC_WID_PIN))
                return 0;
 
-       param = snd_hdac_read_parm_uncached(&hdac->hdac, nid,
+       param = snd_hdac_read_parm_uncached(&edev->hdev, nid,
                                        AC_PAR_DEVLIST_LEN);
        if (param == -1)
                return param;
@@ -219,10 +222,10 @@ static int hdac_hdmi_get_port_len(struct hdac_ext_device *hdac, hda_nid_t nid)
  * id selected on the pin. Return 0 means the first port entry
  * is selected or MST is not supported.
  */
-static int hdac_hdmi_port_select_get(struct hdac_ext_device *hdac,
+static int hdac_hdmi_port_select_get(struct hdac_ext_device *edev,
                                        struct hdac_hdmi_port *port)
 {
-       return snd_hdac_codec_read(&hdac->hdac, port->pin->nid,
+       return snd_hdac_codec_read(&edev->hdev, port->pin->nid,
                                0, AC_VERB_GET_DEVICE_SEL, 0);
 }
 
@@ -230,7 +233,7 @@ static int hdac_hdmi_port_select_get(struct hdac_ext_device *hdac,
  * Sets the selected port entry for the configuring Pin widget verb.
  * returns error if port set is not equal to port get otherwise success
  */
-static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
+static int hdac_hdmi_port_select_set(struct hdac_ext_device *edev,
                                        struct hdac_hdmi_port *port)
 {
        int num_ports;
@@ -239,7 +242,7 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
                return 0;
 
        /* AC_PAR_DEVLIST_LEN is 0 based. */
-       num_ports = hdac_hdmi_get_port_len(hdac, port->pin->nid);
+       num_ports = hdac_hdmi_get_port_len(edev, port->pin->nid);
 
        if (num_ports < 0)
                return -EIO;
@@ -250,13 +253,13 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
        if (num_ports + 1  < port->id)
                return 0;
 
-       snd_hdac_codec_write(&hdac->hdac, port->pin->nid, 0,
+       snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
                        AC_VERB_SET_DEVICE_SEL, port->id);
 
-       if (port->id != hdac_hdmi_port_select_get(hdac, port))
+       if (port->id != hdac_hdmi_port_select_get(edev, port))
                return -EIO;
 
-       dev_dbg(&hdac->hdac.dev, "Selected the port=%d\n", port->id);
+       dev_dbg(&edev->hdev.dev, "Selected the port=%d\n", port->id);
 
        return 0;
 }
@@ -276,9 +279,9 @@ static struct hdac_hdmi_pcm *get_hdmi_pcm_from_id(struct hdac_hdmi_priv *hdmi,
 
 static inline struct hdac_ext_device *to_hda_ext_device(struct device *dev)
 {
-       struct hdac_device *hdac = dev_to_hdac_dev(dev);
+       struct hdac_device *hdev = dev_to_hdac_dev(dev);
 
-       return to_ehdac_device(hdac);
+       return to_ehdac_device(hdev);
 }
 
 static unsigned int sad_format(const u8 *sad)
@@ -321,14 +324,14 @@ format_constraint:
 }
 
 static void
-hdac_hdmi_set_dip_index(struct hdac_ext_device *hdac, hda_nid_t pin_nid,
+hdac_hdmi_set_dip_index(struct hdac_ext_device *edev, hda_nid_t pin_nid,
                                int packet_index, int byte_index)
 {
        int val;
 
        val = (packet_index << 5) | (byte_index & 0x1f);
 
-       snd_hdac_codec_write(&hdac->hdac, pin_nid, 0,
+       snd_hdac_codec_write(&edev->hdev, pin_nid, 0,
                                AC_VERB_SET_HDMI_DIP_INDEX, val);
 }
 
@@ -344,14 +347,14 @@ struct dp_audio_infoframe {
        u8 LFEPBL01_LSV36_DM_INH7;
 };
 
-static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
+static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *edev,
                   struct hdac_hdmi_pcm *pcm, struct hdac_hdmi_port *port)
 {
        uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
        struct hdmi_audio_infoframe frame;
        struct hdac_hdmi_pin *pin = port->pin;
        struct dp_audio_infoframe dp_ai;
-       struct hdac_hdmi_priv *hdmi = hdac->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_cvt *cvt = pcm->cvt;
        u8 *dip;
        int ret;
@@ -360,11 +363,11 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
        u8 conn_type;
        int channels, ca;
 
-       ca = snd_hdac_channel_allocation(&hdac->hdac, port->eld.info.spk_alloc,
+       ca = snd_hdac_channel_allocation(&edev->hdev, port->eld.info.spk_alloc,
                        pcm->channels, pcm->chmap_set, true, pcm->chmap);
 
        channels = snd_hdac_get_active_channels(ca);
-       hdmi->chmap.ops.set_channel_count(&hdac->hdac, cvt->nid, channels);
+       hdmi->chmap.ops.set_channel_count(&edev->hdev, cvt->nid, channels);
 
        snd_hdac_setup_channel_mapping(&hdmi->chmap, pin->nid, false, ca,
                                pcm->channels, pcm->chmap, pcm->chmap_set);
@@ -397,32 +400,32 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
                break;
 
        default:
-               dev_err(&hdac->hdac.dev, "Invalid connection type: %d\n",
+               dev_err(&edev->hdev.dev, "Invalid connection type: %d\n",
                                                conn_type);
                return -EIO;
        }
 
        /* stop infoframe transmission */
-       hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
-       snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+       hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+       snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
                        AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_DISABLE);
 
 
        /*  Fill infoframe. Index auto-incremented */
-       hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
+       hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
        if (conn_type == DRM_ELD_CONN_TYPE_HDMI) {
                for (i = 0; i < sizeof(buffer); i++)
-                       snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+                       snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
                                AC_VERB_SET_HDMI_DIP_DATA, buffer[i]);
        } else {
                for (i = 0; i < sizeof(dp_ai); i++)
-                       snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+                       snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
                                AC_VERB_SET_HDMI_DIP_DATA, dip[i]);
        }
 
        /* Start infoframe */
-       hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
-       snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+       hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+       snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
                        AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_BEST);
 
        return 0;
@@ -433,11 +436,11 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
                int slots, int slot_width)
 {
        struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_dai_port_map *dai_map;
        struct hdac_hdmi_pcm *pcm;
 
-       dev_dbg(&edev->hdac.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
+       dev_dbg(&edev->hdev.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
 
        dai_map = &hdmi->dai_map[dai->id];
 
@@ -452,8 +455,8 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
 static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *hparams, struct snd_soc_dai *dai)
 {
-       struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
-       struct hdac_hdmi_priv *hdmi = hdac->private_data;
+       struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_dai_port_map *dai_map;
        struct hdac_hdmi_port *port;
        struct hdac_hdmi_pcm *pcm;
@@ -466,7 +469,7 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
                return -ENODEV;
 
        if ((!port->eld.monitor_present) || (!port->eld.eld_valid)) {
-               dev_err(&hdac->hdac.dev,
+               dev_err(&edev->hdev.dev,
                        "device is not configured for this pin:port%d:%d\n",
                                        port->pin->nid, port->id);
                return -ENODEV;
@@ -486,28 +489,28 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *hdac,
+static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *edev,
                                        struct hdac_hdmi_pin *pin,
                                        struct hdac_hdmi_port *port)
 {
-       if (!(get_wcaps(&hdac->hdac, pin->nid) & AC_WCAP_CONN_LIST)) {
-               dev_warn(&hdac->hdac.dev,
+       if (!(get_wcaps(&edev->hdev, pin->nid) & AC_WCAP_CONN_LIST)) {
+               dev_warn(&edev->hdev.dev,
                        "HDMI: pin %d wcaps %#x does not support connection list\n",
-                       pin->nid, get_wcaps(&hdac->hdac, pin->nid));
+                       pin->nid, get_wcaps(&edev->hdev, pin->nid));
                return -EINVAL;
        }
 
-       if (hdac_hdmi_port_select_set(hdac, port) < 0)
+       if (hdac_hdmi_port_select_set(edev, port) < 0)
                return -EIO;
 
-       port->num_mux_nids = snd_hdac_get_connections(&hdac->hdac, pin->nid,
+       port->num_mux_nids = snd_hdac_get_connections(&edev->hdev, pin->nid,
                        port->mux_nids, HDA_MAX_CONNECTIONS);
        if (port->num_mux_nids == 0)
-               dev_warn(&hdac->hdac.dev,
+               dev_warn(&edev->hdev.dev,
                        "No connections found for pin:port %d:%d\n",
                                                pin->nid, port->id);
 
-       dev_dbg(&hdac->hdac.dev, "num_mux_nids %d for pin:port %d:%d\n",
+       dev_dbg(&edev->hdev.dev, "num_mux_nids %d for pin:port %d:%d\n",
                        port->num_mux_nids, pin->nid, port->id);
 
        return port->num_mux_nids;
@@ -565,8 +568,8 @@ static struct hdac_hdmi_port *hdac_hdmi_get_port_from_cvt(
 static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
                        struct snd_soc_dai *dai)
 {
-       struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
-       struct hdac_hdmi_priv *hdmi = hdac->private_data;
+       struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_dai_port_map *dai_map;
        struct hdac_hdmi_cvt *cvt;
        struct hdac_hdmi_port *port;
@@ -575,7 +578,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
        dai_map = &hdmi->dai_map[dai->id];
 
        cvt = dai_map->cvt;
-       port = hdac_hdmi_get_port_from_cvt(hdac, hdmi, cvt);
+       port = hdac_hdmi_get_port_from_cvt(edev, hdmi, cvt);
 
        /*
         * To make PA and other userland happy.
@@ -586,7 +589,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
        if ((!port->eld.monitor_present) ||
                        (!port->eld.eld_valid)) {
 
-               dev_warn(&hdac->hdac.dev,
+               dev_warn(&edev->hdev.dev,
                        "Failed: present?:%d ELD valid?:%d pin:port: %d:%d\n",
                        port->eld.monitor_present, port->eld.eld_valid,
                        port->pin->nid, port->id);
@@ -608,8 +611,8 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
 static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
                struct snd_soc_dai *dai)
 {
-       struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
-       struct hdac_hdmi_priv *hdmi = hdac->private_data;
+       struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_dai_port_map *dai_map;
        struct hdac_hdmi_pcm *pcm;
 
@@ -630,14 +633,13 @@ static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
 }
 
 static int
-hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
+hdac_hdmi_query_cvt_params(struct hdac_device *hdev, struct hdac_hdmi_cvt *cvt)
 {
        unsigned int chans;
-       struct hdac_ext_device *edev = to_ehdac_device(hdac);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
        int err;
 
-       chans = get_wcaps(hdac, cvt->nid);
+       chans = get_wcaps(hdev, cvt->nid);
        chans = get_wcaps_channels(chans);
 
        cvt->params.channels_min = 2;
@@ -646,12 +648,12 @@ hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
        if (chans > hdmi->chmap.channels_max)
                hdmi->chmap.channels_max = chans;
 
-       err = snd_hdac_query_supported_pcm(hdac, cvt->nid,
+       err = snd_hdac_query_supported_pcm(hdev, cvt->nid,
                        &cvt->params.rates,
                        &cvt->params.formats,
                        &cvt->params.maxbps);
        if (err < 0)
-               dev_err(&hdac->dev,
+               dev_err(&hdev->dev,
                        "Failed to query pcm params for nid %d: %d\n",
                        cvt->nid, err);
 
@@ -696,7 +698,7 @@ static void hdac_hdmi_fill_route(struct snd_soc_dapm_route *route,
 static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
                                        struct hdac_hdmi_port *port)
 {
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pcm *pcm = NULL;
        struct hdac_hdmi_port *p;
 
@@ -716,9 +718,9 @@ static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
 static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
                             hda_nid_t nid, unsigned int pwr_state)
 {
-       if (get_wcaps(&edev->hdac, nid) & AC_WCAP_POWER) {
-               if (!snd_hdac_check_power_state(&edev->hdac, nid, pwr_state))
-                       snd_hdac_codec_write(&edev->hdac, nid, 0,
+       if (get_wcaps(&edev->hdev, nid) & AC_WCAP_POWER) {
+               if (!snd_hdac_check_power_state(&edev->hdev, nid, pwr_state))
+                       snd_hdac_codec_write(&edev->hdev, nid, 0,
                                AC_VERB_SET_POWER_STATE, pwr_state);
        }
 }
@@ -726,8 +728,8 @@ static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
 static void hdac_hdmi_set_amp(struct hdac_ext_device *edev,
                                   hda_nid_t nid, int val)
 {
-       if (get_wcaps(&edev->hdac, nid) & AC_WCAP_OUT_AMP)
-               snd_hdac_codec_write(&edev->hdac, nid, 0,
+       if (get_wcaps(&edev->hdev, nid) & AC_WCAP_OUT_AMP)
+               snd_hdac_codec_write(&edev->hdev, nid, 0,
                                        AC_VERB_SET_AMP_GAIN_MUTE, val);
 }
 
@@ -739,7 +741,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
        struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
        struct hdac_hdmi_pcm *pcm;
 
-       dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+       dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
                        __func__, w->name, event);
 
        pcm = hdac_hdmi_get_pcm(edev, port);
@@ -755,7 +757,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
                hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D0);
 
                /* Enable out path for this pin widget */
-               snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
                                AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
 
                hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_UNMUTE);
@@ -766,7 +768,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
                hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_MUTE);
 
                /* Disable out path for this pin widget */
-               snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
                                AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
 
                hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D3);
@@ -782,10 +784,10 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
 {
        struct hdac_hdmi_cvt *cvt = w->priv;
        struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pcm *pcm;
 
-       dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+       dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
                        __func__, w->name, event);
 
        pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, cvt);
@@ -797,23 +799,23 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
                hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D0);
 
                /* Enable transmission */
-               snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
                        AC_VERB_SET_DIGI_CONVERT_1, 1);
 
                /* Category Code (CC) to zero */
-               snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
                        AC_VERB_SET_DIGI_CONVERT_2, 0);
 
-               snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
                                AC_VERB_SET_CHANNEL_STREAMID, pcm->stream_tag);
-               snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
                                AC_VERB_SET_STREAM_FORMAT, pcm->format);
                break;
 
        case SND_SOC_DAPM_POST_PMD:
-               snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
                                AC_VERB_SET_CHANNEL_STREAMID, 0);
-               snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
                                AC_VERB_SET_STREAM_FORMAT, 0);
 
                hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D3);
@@ -831,7 +833,7 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
        struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
        int mux_idx;
 
-       dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+       dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
                        __func__, w->name, event);
 
        if (!kc)
@@ -844,7 +846,7 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
                return -EIO;
 
        if (mux_idx > 0) {
-               snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+               snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
                        AC_VERB_SET_CONNECT_SEL, (mux_idx - 1));
        }
 
@@ -864,7 +866,7 @@ static int hdac_hdmi_set_pin_port_mux(struct snd_kcontrol *kcontrol,
        struct snd_soc_dapm_context *dapm = w->dapm;
        struct hdac_hdmi_port *port = w->priv;
        struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pcm *pcm = NULL;
        const char *cvt_name =  e->texts[ucontrol->value.enumerated.item[0]];
 
@@ -922,7 +924,7 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
                                struct snd_soc_dapm_widget *widget,
                                const char *widget_name)
 {
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pin *pin = port->pin;
        struct snd_kcontrol_new *kc;
        struct hdac_hdmi_cvt *cvt;
@@ -934,17 +936,17 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
        int i = 0;
        int num_items = hdmi->num_cvt + 1;
 
-       kc = devm_kzalloc(&edev->hdac.dev, sizeof(*kc), GFP_KERNEL);
+       kc = devm_kzalloc(&edev->hdev.dev, sizeof(*kc), GFP_KERNEL);
        if (!kc)
                return -ENOMEM;
 
-       se = devm_kzalloc(&edev->hdac.dev, sizeof(*se), GFP_KERNEL);
+       se = devm_kzalloc(&edev->hdev.dev, sizeof(*se), GFP_KERNEL);
        if (!se)
                return -ENOMEM;
 
        snprintf(kc_name, NAME_SIZE, "Pin %d port %d Input",
                                                pin->nid, port->id);
-       kc->name = devm_kstrdup(&edev->hdac.dev, kc_name, GFP_KERNEL);
+       kc->name = devm_kstrdup(&edev->hdev.dev, kc_name, GFP_KERNEL);
        if (!kc->name)
                return -ENOMEM;
 
@@ -962,24 +964,24 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
        se->mask = roundup_pow_of_two(se->items) - 1;
 
        sprintf(mux_items, "NONE");
-       items[i] = devm_kstrdup(&edev->hdac.dev, mux_items, GFP_KERNEL);
+       items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
        if (!items[i])
                return -ENOMEM;
 
        list_for_each_entry(cvt, &hdmi->cvt_list, head) {
                i++;
                sprintf(mux_items, "cvt %d", cvt->nid);
-               items[i] = devm_kstrdup(&edev->hdac.dev, mux_items, GFP_KERNEL);
+               items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
                if (!items[i])
                        return -ENOMEM;
        }
 
-       se->texts = devm_kmemdup(&edev->hdac.dev, items,
+       se->texts = devm_kmemdup(&edev->hdev.dev, items,
                        (num_items  * sizeof(char *)), GFP_KERNEL);
        if (!se->texts)
                return -ENOMEM;
 
-       return hdac_hdmi_fill_widget_info(&edev->hdac.dev, widget,
+       return hdac_hdmi_fill_widget_info(&edev->hdev.dev, widget,
                        snd_soc_dapm_mux, port, widget_name, NULL, kc, 1,
                        hdac_hdmi_pin_mux_widget_event,
                        SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_REG);
@@ -990,7 +992,7 @@ static void hdac_hdmi_add_pinmux_cvt_route(struct hdac_ext_device *edev,
                        struct snd_soc_dapm_widget *widgets,
                        struct snd_soc_dapm_route *route, int rindex)
 {
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        const struct snd_kcontrol_new *kc;
        struct soc_enum *se;
        int mux_index = hdmi->num_cvt + hdmi->num_ports;
@@ -1033,8 +1035,8 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
        struct snd_soc_dapm_widget *widgets;
        struct snd_soc_dapm_route *route;
        struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
-       struct snd_soc_dai_driver *dai_drv = dapm->component->dai_drv;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+       struct snd_soc_dai_driver *dai_drv = hdmi->dai_drv;
        char widget_name[NAME_SIZE];
        struct hdac_hdmi_cvt *cvt;
        struct hdac_hdmi_pin *pin;
@@ -1134,7 +1136,7 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
 
 static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
 {
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_dai_port_map *dai_map;
        struct hdac_hdmi_cvt *cvt;
        int dai_id = 0;
@@ -1150,7 +1152,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
                dai_id++;
 
                if (dai_id == HDA_MAX_CVTS) {
-                       dev_warn(&edev->hdac.dev,
+                       dev_warn(&edev->hdev.dev,
                                "Max dais supported: %d\n", dai_id);
                        break;
                }
@@ -1161,7 +1163,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
 
 static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
 {
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_cvt *cvt;
        char name[NAME_SIZE];
 
@@ -1176,7 +1178,7 @@ static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
        list_add_tail(&cvt->head, &hdmi->cvt_list);
        hdmi->num_cvt++;
 
-       return hdac_hdmi_query_cvt_params(&edev->hdac, cvt);
+       return hdac_hdmi_query_cvt_params(&edev->hdev, cvt);
 }
 
 static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
@@ -1188,7 +1190,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
                                                >> DRM_ELD_VER_SHIFT;
 
        if (ver != ELD_VER_CEA_861D && ver != ELD_VER_PARTIAL) {
-               dev_err(&edev->hdac.dev, "HDMI: Unknown ELD version %d\n", ver);
+               dev_err(&edev->hdev.dev, "HDMI: Unknown ELD version %d\n", ver);
                return -EINVAL;
        }
 
@@ -1196,7 +1198,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
                DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
 
        if (mnl > ELD_MAX_MNL) {
-               dev_err(&edev->hdac.dev, "HDMI: MNL Invalid %d\n", mnl);
+               dev_err(&edev->hdev.dev, "HDMI: MNL Invalid %d\n", mnl);
                return -EINVAL;
        }
 
@@ -1209,7 +1211,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
                                    struct hdac_hdmi_port *port)
 {
        struct hdac_ext_device *edev = pin->edev;
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pcm *pcm;
        int size = 0;
        int port_id = -1;
@@ -1227,7 +1229,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
        if (pin->mst_capable)
                port_id = port->id;
 
-       size = snd_hdac_acomp_get_eld(&edev->hdac, pin->nid, port_id,
+       size = snd_hdac_acomp_get_eld(&edev->hdev, pin->nid, port_id,
                                &port->eld.monitor_present,
                                port->eld.eld_buffer,
                                ELD_MAX_SIZE);
@@ -1250,7 +1252,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
 
        if (!port->eld.monitor_present || !port->eld.eld_valid) {
 
-               dev_err(&edev->hdac.dev, "%s: disconnect for pin:port %d:%d\n",
+               dev_err(&edev->hdev.dev, "%s: disconnect for pin:port %d:%d\n",
                                                __func__, pin->nid, port->id);
 
                /*
@@ -1304,7 +1306,7 @@ static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi,
 
 static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
 {
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pin *pin;
        int ret;
 
@@ -1333,40 +1335,38 @@ static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
 #define INTEL_EN_DP12                  0x02 /* enable DP 1.2 features */
 #define INTEL_EN_ALL_PIN_CVTS  0x01 /* enable 2nd & 3rd pins and convertors */
 
-static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdac)
+static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdev)
 {
        unsigned int vendor_param;
-       struct hdac_ext_device *edev = to_ehdac_device(hdac);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
        unsigned int vendor_nid = hdmi->drv_data->vendor_nid;
 
-       vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+       vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
                                INTEL_GET_VENDOR_VERB, 0);
        if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
                return;
 
        vendor_param |= INTEL_EN_ALL_PIN_CVTS;
-       vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+       vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
                                INTEL_SET_VENDOR_VERB, vendor_param);
        if (vendor_param == -1)
                return;
 }
 
-static void hdac_hdmi_skl_enable_dp12(struct hdac_device *hdac)
+static void hdac_hdmi_skl_enable_dp12(struct hdac_device *hdev)
 {
        unsigned int vendor_param;
-       struct hdac_ext_device *edev = to_ehdac_device(hdac);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
        unsigned int vendor_nid = hdmi->drv_data->vendor_nid;
 
-       vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+       vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
                                INTEL_GET_VENDOR_VERB, 0);
        if (vendor_param == -1 || vendor_param & INTEL_EN_DP12)
                return;
 
        /* enable DP1.2 mode */
        vendor_param |= INTEL_EN_DP12;
-       vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+       vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
                                INTEL_SET_VENDOR_VERB, vendor_param);
        if (vendor_param == -1)
                return;
@@ -1384,7 +1384,7 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
  * Each converter can support a stream independently. So a dai is created
  * based on the number of converter queried.
  */
-static int hdac_hdmi_create_dais(struct hdac_device *hdac,
+static int hdac_hdmi_create_dais(struct hdac_device *hdev,
                struct snd_soc_dai_driver **dais,
                struct hdac_hdmi_priv *hdmi, int num_dais)
 {
@@ -1397,20 +1397,20 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
        u64 formats;
        int ret;
 
-       hdmi_dais = devm_kzalloc(&hdac->dev,
+       hdmi_dais = devm_kzalloc(&hdev->dev,
                        (sizeof(*hdmi_dais) * num_dais),
                        GFP_KERNEL);
        if (!hdmi_dais)
                return -ENOMEM;
 
        list_for_each_entry(cvt, &hdmi->cvt_list, head) {
-               ret = snd_hdac_query_supported_pcm(hdac, cvt->nid,
+               ret = snd_hdac_query_supported_pcm(hdev, cvt->nid,
                                        &rates, &formats, &bps);
                if (ret)
                        return ret;
 
                sprintf(dai_name, "intel-hdmi-hifi%d", i+1);
-               hdmi_dais[i].name = devm_kstrdup(&hdac->dev,
+               hdmi_dais[i].name = devm_kstrdup(&hdev->dev,
                                        dai_name, GFP_KERNEL);
 
                if (!hdmi_dais[i].name)
@@ -1418,7 +1418,7 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
 
                snprintf(name, sizeof(name), "hifi%d", i+1);
                hdmi_dais[i].playback.stream_name =
-                               devm_kstrdup(&hdac->dev, name, GFP_KERNEL);
+                               devm_kstrdup(&hdev->dev, name, GFP_KERNEL);
                if (!hdmi_dais[i].playback.stream_name)
                        return -ENOMEM;
 
@@ -1438,6 +1438,7 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
        }
 
        *dais = hdmi_dais;
+       hdmi->dai_drv = hdmi_dais;
 
        return 0;
 }
@@ -1451,29 +1452,26 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
 {
        hda_nid_t nid;
        int i, num_nodes;
-       struct hdac_device *hdac = &edev->hdac;
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
        struct hdac_hdmi_cvt *temp_cvt, *cvt_next;
        struct hdac_hdmi_pin *temp_pin, *pin_next;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+       struct hdac_device *hdev = &edev->hdev;
        int ret;
 
-       hdac_hdmi_skl_enable_all_pins(hdac);
-       hdac_hdmi_skl_enable_dp12(hdac);
+       hdac_hdmi_skl_enable_all_pins(hdev);
+       hdac_hdmi_skl_enable_dp12(hdev);
 
-       num_nodes = snd_hdac_get_sub_nodes(hdac, hdac->afg, &nid);
+       num_nodes = snd_hdac_get_sub_nodes(hdev, hdev->afg, &nid);
        if (!nid || num_nodes <= 0) {
-               dev_warn(&hdac->dev, "HDMI: failed to get afg sub nodes\n");
+               dev_warn(&hdev->dev, "HDMI: failed to get afg sub nodes\n");
                return -EINVAL;
        }
 
-       hdac->num_nodes = num_nodes;
-       hdac->start_nid = nid;
-
-       for (i = 0; i < hdac->num_nodes; i++, nid++) {
+       for (i = 0; i < num_nodes; i++, nid++) {
                unsigned int caps;
                unsigned int type;
 
-               caps = get_wcaps(hdac, nid);
+               caps = get_wcaps(hdev, nid);
                type = get_wcaps_type(caps);
 
                if (!(caps & AC_WCAP_DIGITAL))
@@ -1495,16 +1493,14 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
                }
        }
 
-       hdac->end_nid = nid;
-
        if (!hdmi->num_pin || !hdmi->num_cvt) {
                ret = -EIO;
                goto free_widgets;
        }
 
-       ret = hdac_hdmi_create_dais(hdac, dais, hdmi, hdmi->num_cvt);
+       ret = hdac_hdmi_create_dais(hdev, dais, hdmi, hdmi->num_cvt);
        if (ret) {
-               dev_err(&hdac->dev, "Failed to create dais with err: %d\n",
+               dev_err(&hdev->dev, "Failed to create dais with err: %d\n",
                                                        ret);
                goto free_widgets;
        }
@@ -1537,7 +1533,7 @@ free_widgets:
 static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
 {
        struct hdac_ext_device *edev = aptr;
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pin *pin = NULL;
        struct hdac_hdmi_port *hport = NULL;
        struct snd_soc_codec *codec = edev->scodec;
@@ -1546,7 +1542,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
        /* Don't know how this mapping is derived */
        hda_nid_t pin_nid = port + 0x04;
 
-       dev_dbg(&edev->hdac.dev, "%s: for pin:%d port=%d\n", __func__,
+       dev_dbg(&edev->hdev.dev, "%s: for pin:%d port=%d\n", __func__,
                                                        pin_nid, pipe);
 
        /*
@@ -1559,7 +1555,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
                        SNDRV_CTL_POWER_D0)
                return;
 
-       if (atomic_read(&edev->hdac.in_pm))
+       if (atomic_read(&edev->hdev.in_pm))
                return;
 
        list_for_each_entry(pin, &hdmi->pin_list, head) {
@@ -1614,7 +1610,7 @@ static int create_fill_jack_kcontrols(struct snd_soc_card *card,
        char *name;
        int i = 0, j;
        struct snd_soc_codec *codec = edev->scodec;
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 
        kc = devm_kcalloc(codec->dev, hdmi->num_ports,
                                sizeof(*kc), GFP_KERNEL);
@@ -1652,7 +1648,7 @@ int hdac_hdmi_jack_port_init(struct snd_soc_codec *codec,
                        struct snd_soc_dapm_context *dapm)
 {
        struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pin *pin;
        struct snd_soc_dapm_widget *widgets;
        struct snd_soc_dapm_route *route;
@@ -1728,7 +1724,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
 {
        struct snd_soc_codec *codec = dai->codec;
        struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pcm *pcm;
        struct snd_pcm *snd_pcm;
        int err;
@@ -1750,7 +1746,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
        if (snd_pcm) {
                err = snd_hdac_add_chmap_ctls(snd_pcm, device, &hdmi->chmap);
                if (err < 0) {
-                       dev_err(&edev->hdac.dev,
+                       dev_err(&edev->hdev.dev,
                                "chmap control add failed with err: %d for pcm: %d\n",
                                err, device);
                        kfree(pcm);
@@ -1791,7 +1787,7 @@ static void hdac_hdmi_present_sense_all_pins(struct hdac_ext_device *edev,
 static int hdmi_codec_probe(struct snd_soc_codec *codec)
 {
        struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct snd_soc_dapm_context *dapm =
                snd_soc_component_get_dapm(&codec->component);
        struct hdac_ext_link *hlink = NULL;
@@ -1803,9 +1799,9 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
         * hold the ref while we probe, also no need to drop the ref on
         * exit, we call pm_runtime_suspend() so that will do for us
         */
-       hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+       hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
        if (!hlink) {
-               dev_err(&edev->hdac.dev, "hdac link not found\n");
+               dev_err(&edev->hdev.dev, "hdac link not found\n");
                return -EIO;
        }
 
@@ -1818,7 +1814,7 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
        aops.audio_ptr = edev;
        ret = snd_hdac_i915_register_notifier(&aops);
        if (ret < 0) {
-               dev_err(&edev->hdac.dev, "notifier register failed: err: %d\n",
+               dev_err(&edev->hdev.dev, "notifier register failed: err: %d\n",
                                ret);
                return ret;
        }
@@ -1831,9 +1827,9 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
         * hdac_device core already sets the state to active and calls
         * get_noresume. So enable runtime and set the device to suspend.
         */
-       pm_runtime_enable(&edev->hdac.dev);
-       pm_runtime_put(&edev->hdac.dev);
-       pm_runtime_suspend(&edev->hdac.dev);
+       pm_runtime_enable(&edev->hdev.dev);
+       pm_runtime_put(&edev->hdev.dev);
+       pm_runtime_suspend(&edev->hdev.dev);
 
        return 0;
 }
@@ -1842,7 +1838,7 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
 {
        struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
 
-       pm_runtime_disable(&edev->hdac.dev);
+       pm_runtime_disable(&edev->hdev.dev);
        return 0;
 }
 
@@ -1850,9 +1846,9 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
 static int hdmi_codec_prepare(struct device *dev)
 {
        struct hdac_ext_device *edev = to_hda_ext_device(dev);
-       struct hdac_device *hdac = &edev->hdac;
+       struct hdac_device *hdev = &edev->hdev;
 
-       pm_runtime_get_sync(&edev->hdac.dev);
+       pm_runtime_get_sync(&edev->hdev.dev);
 
        /*
         * Power down afg.
@@ -1861,7 +1857,7 @@ static int hdmi_codec_prepare(struct device *dev)
         * is received. So setting power state is ensured without using loop
         * to read the state.
         */
-       snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+       snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
                                                        AC_PWRST_D3);
 
        return 0;
@@ -1870,15 +1866,15 @@ static int hdmi_codec_prepare(struct device *dev)
 static void hdmi_codec_complete(struct device *dev)
 {
        struct hdac_ext_device *edev = to_hda_ext_device(dev);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
-       struct hdac_device *hdac = &edev->hdac;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+       struct hdac_device *hdev = &edev->hdev;
 
        /* Power up afg */
-       snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+       snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
                                                        AC_PWRST_D0);
 
-       hdac_hdmi_skl_enable_all_pins(&edev->hdac);
-       hdac_hdmi_skl_enable_dp12(&edev->hdac);
+       hdac_hdmi_skl_enable_all_pins(&edev->hdev);
+       hdac_hdmi_skl_enable_dp12(&edev->hdev);
 
        /*
         * As the ELD notify callback request is not entertained while the
@@ -1888,7 +1884,7 @@ static void hdmi_codec_complete(struct device *dev)
         */
        hdac_hdmi_present_sense_all_pins(edev, hdmi, false);
 
-       pm_runtime_put_sync(&edev->hdac.dev);
+       pm_runtime_put_sync(&edev->hdev.dev);
 }
 #else
 #define hdmi_codec_prepare NULL
@@ -1901,21 +1897,20 @@ static const struct snd_soc_codec_driver hdmi_hda_codec = {
        .idle_bias_off  = true,
 };
 
-static void hdac_hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx,
+static void hdac_hdmi_get_chmap(struct hdac_device *hdev, int pcm_idx,
                                        unsigned char *chmap)
 {
-       struct hdac_ext_device *edev = to_ehdac_device(hdac);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
        struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
 
        memcpy(chmap, pcm->chmap, ARRAY_SIZE(pcm->chmap));
 }
 
-static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
+static void hdac_hdmi_set_chmap(struct hdac_device *hdev, int pcm_idx,
                                unsigned char *chmap, int prepared)
 {
-       struct hdac_ext_device *edev = to_ehdac_device(hdac);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_ext_device *edev = to_ehdac_device(hdev);
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
        struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
        struct hdac_hdmi_port *port;
 
@@ -1934,10 +1929,9 @@ static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
        mutex_unlock(&pcm->lock);
 }
 
-static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
+static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdev, int pcm_idx)
 {
-       struct hdac_ext_device *edev = to_ehdac_device(hdac);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
        struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
 
        if (!pcm)
@@ -1949,10 +1943,9 @@ static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
        return true;
 }
 
-static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
+static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdev, int pcm_idx)
 {
-       struct hdac_ext_device *edev = to_ehdac_device(hdac);
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
        struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
        struct hdac_hdmi_port *port;
 
@@ -1983,30 +1976,30 @@ static struct hdac_hdmi_drv_data intel_drv_data  = {
 
 static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 {
-       struct hdac_device *codec = &edev->hdac;
+       struct hdac_device *hdev = &edev->hdev;
        struct hdac_hdmi_priv *hdmi_priv;
        struct snd_soc_dai_driver *hdmi_dais = NULL;
        struct hdac_ext_link *hlink = NULL;
        int num_dais = 0;
        int ret = 0;
-       struct hdac_driver *hdrv = drv_to_hdac_driver(codec->dev.driver);
-       const struct hda_device_id *hdac_id = hdac_get_device_id(codec, hdrv);
+       struct hdac_driver *hdrv = drv_to_hdac_driver(hdev->dev.driver);
+       const struct hda_device_id *hdac_id = hdac_get_device_id(hdev, hdrv);
 
        /* hold the ref while we probe */
-       hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+       hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
        if (!hlink) {
-               dev_err(&edev->hdac.dev, "hdac link not found\n");
+               dev_err(&edev->hdev.dev, "hdac link not found\n");
                return -EIO;
        }
 
        snd_hdac_ext_bus_link_get(edev->ebus, hlink);
 
-       hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
+       hdmi_priv = devm_kzalloc(&hdev->dev, sizeof(*hdmi_priv), GFP_KERNEL);
        if (hdmi_priv == NULL)
                return -ENOMEM;
 
        edev->private_data = hdmi_priv;
-       snd_hdac_register_chmap_ops(codec, &hdmi_priv->chmap);
+       snd_hdac_register_chmap_ops(hdev, &hdmi_priv->chmap);
        hdmi_priv->chmap.ops.get_chmap = hdac_hdmi_get_chmap;
        hdmi_priv->chmap.ops.set_chmap = hdac_hdmi_set_chmap;
        hdmi_priv->chmap.ops.is_pcm_attached = is_hdac_hdmi_pcm_attached;
@@ -2021,7 +2014,7 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
        else
                hdmi_priv->drv_data = &intel_drv_data;
 
-       dev_set_drvdata(&codec->dev, edev);
+       dev_set_drvdata(&hdev->dev, edev);
 
        INIT_LIST_HEAD(&hdmi_priv->pin_list);
        INIT_LIST_HEAD(&hdmi_priv->cvt_list);
@@ -2032,9 +2025,9 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
         * Turned off in the runtime_suspend during the first explicit
         * pm_runtime_suspend call.
         */
-       ret = snd_hdac_display_power(edev->hdac.bus, true);
+       ret = snd_hdac_display_power(edev->hdev.bus, true);
        if (ret < 0) {
-               dev_err(&edev->hdac.dev,
+               dev_err(&edev->hdev.dev,
                        "Cannot turn on display power on i915 err: %d\n",
                        ret);
                return ret;
@@ -2042,13 +2035,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 
        ret = hdac_hdmi_parse_and_map_nid(edev, &hdmi_dais, &num_dais);
        if (ret < 0) {
-               dev_err(&codec->dev,
+               dev_err(&hdev->dev,
                        "Failed in parse and map nid with err: %d\n", ret);
                return ret;
        }
+       snd_hdac_refresh_widgets(hdev, true);
 
        /* ASoC specific initialization */
-       ret = snd_soc_register_codec(&codec->dev, &hdmi_hda_codec,
+       ret = snd_soc_register_codec(&hdev->dev, &hdmi_hda_codec,
                                        hdmi_dais, num_dais);
 
        snd_hdac_ext_bus_link_put(edev->ebus, hlink);
@@ -2058,14 +2052,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 
 static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
 {
-       struct hdac_hdmi_priv *hdmi = edev->private_data;
+       struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
        struct hdac_hdmi_pin *pin, *pin_next;
        struct hdac_hdmi_cvt *cvt, *cvt_next;
        struct hdac_hdmi_pcm *pcm, *pcm_next;
        struct hdac_hdmi_port *port, *port_next;
        int i;
 
-       snd_soc_unregister_codec(&edev->hdac.dev);
+       snd_soc_unregister_codec(&edev->hdev.dev);
 
        list_for_each_entry_safe(pcm, pcm_next, &hdmi->pcm_list, head) {
                pcm->cvt = NULL;
@@ -2101,8 +2095,8 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
 static int hdac_hdmi_runtime_suspend(struct device *dev)
 {
        struct hdac_ext_device *edev = to_hda_ext_device(dev);
-       struct hdac_device *hdac = &edev->hdac;
-       struct hdac_bus *bus = hdac->bus;
+       struct hdac_device *hdev = &edev->hdev;
+       struct hdac_bus *bus = hdev->bus;
        struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
        struct hdac_ext_link *hlink = NULL;
        int err;
@@ -2120,7 +2114,7 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
         * is received. So setting power state is ensured without using loop
         * to read the state.
         */
-       snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+       snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
                                                        AC_PWRST_D3);
        err = snd_hdac_display_power(bus, false);
        if (err < 0) {
@@ -2142,8 +2136,8 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
 static int hdac_hdmi_runtime_resume(struct device *dev)
 {
        struct hdac_ext_device *edev = to_hda_ext_device(dev);
-       struct hdac_device *hdac = &edev->hdac;
-       struct hdac_bus *bus = hdac->bus;
+       struct hdac_device *hdev = &edev->hdev;
+       struct hdac_bus *bus = hdev->bus;
        struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
        struct hdac_ext_link *hlink = NULL;
        int err;
@@ -2168,11 +2162,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
                return err;
        }
 
-       hdac_hdmi_skl_enable_all_pins(&edev->hdac);
-       hdac_hdmi_skl_enable_dp12(&edev->hdac);
+       hdac_hdmi_skl_enable_all_pins(&edev->hdev);
+       hdac_hdmi_skl_enable_dp12(&edev->hdev);
 
        /* Power up afg */
-       snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+       snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
                                                        AC_PWRST_D0);
 
        return 0;
@@ -2192,6 +2186,8 @@ static const struct hda_device_id hdmi_list[] = {
        HDA_CODEC_EXT_ENTRY(0x80862809, 0x100000, "Skylake HDMI", 0),
        HDA_CODEC_EXT_ENTRY(0x8086280a, 0x100000, "Broxton HDMI", 0),
        HDA_CODEC_EXT_ENTRY(0x8086280b, 0x100000, "Kabylake HDMI", 0),
+       HDA_CODEC_EXT_ENTRY(0x8086280c, 0x100000, "Cannonlake HDMI",
+                                                  &intel_glk_drv_data),
        HDA_CODEC_EXT_ENTRY(0x8086280d, 0x100000, "Geminilake HDMI",
                                                   &intel_glk_drv_data),
        {}
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
new file mode 100644 (file)
index 0000000..31b0864
--- /dev/null
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, Maxim Integrated */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/tlv.h>
+#include "max98373.h"
+
+static struct reg_default max98373_reg[] = {
+       {MAX98373_R2000_SW_RESET, 0x00},
+       {MAX98373_R2001_INT_RAW1, 0x00},
+       {MAX98373_R2002_INT_RAW2, 0x00},
+       {MAX98373_R2003_INT_RAW3, 0x00},
+       {MAX98373_R2004_INT_STATE1, 0x00},
+       {MAX98373_R2005_INT_STATE2, 0x00},
+       {MAX98373_R2006_INT_STATE3, 0x00},
+       {MAX98373_R2007_INT_FLAG1, 0x00},
+       {MAX98373_R2008_INT_FLAG2, 0x00},
+       {MAX98373_R2009_INT_FLAG3, 0x00},
+       {MAX98373_R200A_INT_EN1, 0x00},
+       {MAX98373_R200B_INT_EN2, 0x00},
+       {MAX98373_R200C_INT_EN3, 0x00},
+       {MAX98373_R200D_INT_FLAG_CLR1, 0x00},
+       {MAX98373_R200E_INT_FLAG_CLR2, 0x00},
+       {MAX98373_R200F_INT_FLAG_CLR3, 0x00},
+       {MAX98373_R2010_IRQ_CTRL, 0x00},
+       {MAX98373_R2014_THERM_WARN_THRESH, 0x10},
+       {MAX98373_R2015_THERM_SHDN_THRESH, 0x27},
+       {MAX98373_R2016_THERM_HYSTERESIS, 0x01},
+       {MAX98373_R2017_THERM_FOLDBACK_SET, 0xC0},
+       {MAX98373_R2018_THERM_FOLDBACK_EN, 0x00},
+       {MAX98373_R201E_PIN_DRIVE_STRENGTH, 0x55},
+       {MAX98373_R2020_PCM_TX_HIZ_EN_1, 0xFE},
+       {MAX98373_R2021_PCM_TX_HIZ_EN_2, 0xFF},
+       {MAX98373_R2022_PCM_TX_SRC_1, 0x00},
+       {MAX98373_R2023_PCM_TX_SRC_2, 0x00},
+       {MAX98373_R2024_PCM_DATA_FMT_CFG, 0xC0},
+       {MAX98373_R2025_AUDIO_IF_MODE, 0x00},
+       {MAX98373_R2026_PCM_CLOCK_RATIO, 0x04},
+       {MAX98373_R2027_PCM_SR_SETUP_1, 0x08},
+       {MAX98373_R2028_PCM_SR_SETUP_2, 0x88},
+       {MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1, 0x00},
+       {MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2, 0x00},
+       {MAX98373_R202B_PCM_RX_EN, 0x00},
+       {MAX98373_R202C_PCM_TX_EN, 0x00},
+       {MAX98373_R202E_ICC_RX_CH_EN_1, 0x00},
+       {MAX98373_R202F_ICC_RX_CH_EN_2, 0x00},
+       {MAX98373_R2030_ICC_TX_HIZ_EN_1, 0xFF},
+       {MAX98373_R2031_ICC_TX_HIZ_EN_2, 0xFF},
+       {MAX98373_R2032_ICC_LINK_EN_CFG, 0x30},
+       {MAX98373_R2034_ICC_TX_CNTL, 0x00},
+       {MAX98373_R2035_ICC_TX_EN, 0x00},
+       {MAX98373_R2036_SOUNDWIRE_CTRL, 0x05},
+       {MAX98373_R203D_AMP_DIG_VOL_CTRL, 0x00},
+       {MAX98373_R203E_AMP_PATH_GAIN, 0x08},
+       {MAX98373_R203F_AMP_DSP_CFG, 0x02},
+       {MAX98373_R2040_TONE_GEN_CFG, 0x00},
+       {MAX98373_R2041_AMP_CFG, 0x03},
+       {MAX98373_R2042_AMP_EDGE_RATE_CFG, 0x00},
+       {MAX98373_R2043_AMP_EN, 0x00},
+       {MAX98373_R2046_IV_SENSE_ADC_DSP_CFG, 0x04},
+       {MAX98373_R2047_IV_SENSE_ADC_EN, 0x00},
+       {MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0x00},
+       {MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG, 0x00},
+       {MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG, 0x00},
+       {MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0x00},
+       {MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0x00},
+       {MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0x00},
+       {MAX98373_R2090_BDE_LVL_HOLD, 0x00},
+       {MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0x00},
+       {MAX98373_R2092_BDE_CLIPPER_MODE, 0x00},
+       {MAX98373_R2097_BDE_L1_THRESH, 0x00},
+       {MAX98373_R2098_BDE_L2_THRESH, 0x00},
+       {MAX98373_R2099_BDE_L3_THRESH, 0x00},
+       {MAX98373_R209A_BDE_L4_THRESH, 0x00},
+       {MAX98373_R209B_BDE_THRESH_HYST, 0x00},
+       {MAX98373_R20A8_BDE_L1_CFG_1, 0x00},
+       {MAX98373_R20A9_BDE_L1_CFG_2, 0x00},
+       {MAX98373_R20AA_BDE_L1_CFG_3, 0x00},
+       {MAX98373_R20AB_BDE_L2_CFG_1, 0x00},
+       {MAX98373_R20AC_BDE_L2_CFG_2, 0x00},
+       {MAX98373_R20AD_BDE_L2_CFG_3, 0x00},
+       {MAX98373_R20AE_BDE_L3_CFG_1, 0x00},
+       {MAX98373_R20AF_BDE_L3_CFG_2, 0x00},
+       {MAX98373_R20B0_BDE_L3_CFG_3, 0x00},
+       {MAX98373_R20B1_BDE_L4_CFG_1, 0x00},
+       {MAX98373_R20B2_BDE_L4_CFG_2, 0x00},
+       {MAX98373_R20B3_BDE_L4_CFG_3, 0x00},
+       {MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE, 0x00},
+       {MAX98373_R20B5_BDE_EN, 0x00},
+       {MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0x00},
+       {MAX98373_R20D1_DHT_CFG, 0x01},
+       {MAX98373_R20D2_DHT_ATTACK_CFG, 0x02},
+       {MAX98373_R20D3_DHT_RELEASE_CFG, 0x03},
+       {MAX98373_R20D4_DHT_EN, 0x00},
+       {MAX98373_R20E0_LIMITER_THRESH_CFG, 0x00},
+       {MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0x00},
+       {MAX98373_R20E2_LIMITER_EN, 0x00},
+       {MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG, 0x00},
+       {MAX98373_R20FF_GLOBAL_SHDN, 0x00},
+       {MAX98373_R21FF_REV_ID, 0x42},
+};
+
+static int max98373_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+       struct snd_soc_codec *codec = codec_dai->codec;
+       struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+       unsigned int format = 0;
+       unsigned int invert = 0;
+
+       dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
+
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+       case SND_SOC_DAIFMT_IB_NF:
+               invert = MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE;
+               break;
+       default:
+               dev_err(codec->dev, "DAI invert mode unsupported\n");
+               return -EINVAL;
+       }
+
+       regmap_update_bits(max98373->regmap,
+               MAX98373_R2026_PCM_CLOCK_RATIO,
+               MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE,
+               invert);
+
+       /* interface format */
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               format = MAX98373_PCM_FORMAT_I2S;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               format = MAX98373_PCM_FORMAT_LJ;
+               break;
+       case SND_SOC_DAIFMT_DSP_A:
+               format = MAX98373_PCM_FORMAT_TDM_MODE1;
+               break;
+       case SND_SOC_DAIFMT_DSP_B:
+               format = MAX98373_PCM_FORMAT_TDM_MODE0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       regmap_update_bits(max98373->regmap,
+               MAX98373_R2024_PCM_DATA_FMT_CFG,
+               MAX98373_PCM_MODE_CFG_FORMAT_MASK,
+               format << MAX98373_PCM_MODE_CFG_FORMAT_SHIFT);
+
+       return 0;
+}
+
+/* BCLKs per LRCLK */
+static const int bclk_sel_table[] = {
+       32, 48, 64, 96, 128, 192, 256, 384, 512, 320,
+};
+
+static int max98373_get_bclk_sel(int bclk)
+{
+       int i;
+       /* match BCLKs per LRCLK */
+       for (i = 0; i < ARRAY_SIZE(bclk_sel_table); i++) {
+               if (bclk_sel_table[i] == bclk)
+                       return i + 2;
+       }
+       return 0;
+}
+
+static int max98373_set_clock(struct snd_soc_codec *codec,
+       struct snd_pcm_hw_params *params)
+{
+       struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+       /* BCLK/LRCLK ratio calculation */
+       int blr_clk_ratio = params_channels(params) * max98373->ch_size;
+       int value;
+
+       if (!max98373->tdm_mode) {
+               /* BCLK configuration */
+               value = max98373_get_bclk_sel(blr_clk_ratio);
+               if (!value) {
+                       dev_err(codec->dev, "format unsupported %d\n",
+                               params_format(params));
+                       return -EINVAL;
+               }
+
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R2026_PCM_CLOCK_RATIO,
+                       MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+                       value);
+       }
+       return 0;
+}
+
+static int max98373_dai_hw_params(struct snd_pcm_substream *substream,
+       struct snd_pcm_hw_params *params,
+       struct snd_soc_dai *dai)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+       unsigned int sampling_rate = 0;
+       unsigned int chan_sz = 0;
+
+       /* pcm mode configuration */
+       switch (snd_pcm_format_width(params_format(params))) {
+       case 16:
+               chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+               break;
+       case 24:
+               chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+               break;
+       case 32:
+               chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+               break;
+       default:
+               dev_err(codec->dev, "format unsupported %d\n",
+                       params_format(params));
+               goto err;
+       }
+
+       max98373->ch_size = snd_pcm_format_width(params_format(params));
+
+       regmap_update_bits(max98373->regmap,
+               MAX98373_R2024_PCM_DATA_FMT_CFG,
+               MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+       dev_dbg(codec->dev, "format supported %d",
+               params_format(params));
+
+       /* sampling rate configuration */
+       switch (params_rate(params)) {
+       case 8000:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_8000;
+               break;
+       case 11025:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_11025;
+               break;
+       case 12000:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_12000;
+               break;
+       case 16000:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_16000;
+               break;
+       case 22050:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_22050;
+               break;
+       case 24000:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_24000;
+               break;
+       case 32000:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_32000;
+               break;
+       case 44100:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_44100;
+               break;
+       case 48000:
+               sampling_rate = MAX98373_PCM_SR_SET1_SR_48000;
+               break;
+       default:
+               dev_err(codec->dev, "rate %d not supported\n",
+                       params_rate(params));
+               goto err;
+       }
+
+       /* set DAI_SR to correct LRCLK frequency */
+       regmap_update_bits(max98373->regmap,
+               MAX98373_R2027_PCM_SR_SETUP_1,
+               MAX98373_PCM_SR_SET1_SR_MASK,
+               sampling_rate);
+       regmap_update_bits(max98373->regmap,
+               MAX98373_R2028_PCM_SR_SETUP_2,
+               MAX98373_PCM_SR_SET2_SR_MASK,
+               sampling_rate << MAX98373_PCM_SR_SET2_SR_SHIFT);
+
+       /* set sampling rate of IV */
+       if (max98373->interleave_mode &&
+           sampling_rate > MAX98373_PCM_SR_SET1_SR_16000)
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R2028_PCM_SR_SETUP_2,
+                       MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+                       sampling_rate - 3);
+       else
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R2028_PCM_SR_SETUP_2,
+                       MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+                       sampling_rate);
+
+       return max98373_set_clock(codec, params);
+err:
+       return -EINVAL;
+}
+
+static int max98373_dai_tdm_slot(struct snd_soc_dai *dai,
+       unsigned int tx_mask, unsigned int rx_mask,
+       int slots, int slot_width)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+       int bsel = 0;
+       unsigned int chan_sz = 0;
+       unsigned int mask;
+       int x, slot_found;
+
+       if (!tx_mask && !rx_mask && !slots && !slot_width)
+               max98373->tdm_mode = false;
+       else
+               max98373->tdm_mode = true;
+
+       /* BCLK configuration */
+       bsel = max98373_get_bclk_sel(slots * slot_width);
+       if (bsel == 0) {
+               dev_err(codec->dev, "BCLK %d not supported\n",
+                       slots * slot_width);
+               return -EINVAL;
+       }
+
+       regmap_update_bits(max98373->regmap,
+               MAX98373_R2026_PCM_CLOCK_RATIO,
+               MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+               bsel);
+
+       /* Channel size configuration */
+       switch (slot_width) {
+       case 16:
+               chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+               break;
+       case 24:
+               chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+               break;
+       case 32:
+               chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+               break;
+       default:
+               dev_err(codec->dev, "format unsupported %d\n",
+                       slot_width);
+               return -EINVAL;
+       }
+
+       regmap_update_bits(max98373->regmap,
+               MAX98373_R2024_PCM_DATA_FMT_CFG,
+               MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+       /* Rx slot configuration */
+       slot_found = 0;
+       mask = rx_mask;
+       for (x = 0 ; x < 16 ; x++, mask >>= 1) {
+               if (mask & 0x1) {
+                       if (slot_found == 0)
+                               regmap_update_bits(max98373->regmap,
+                                       MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+                                       MAX98373_PCM_TO_SPK_CH0_SRC_MASK, x);
+                       else
+                               regmap_write(max98373->regmap,
+                                       MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+                                       x);
+                       slot_found++;
+                       if (slot_found > 1)
+                               break;
+               }
+       }
+
+       /* Tx slot Hi-Z configuration */
+       regmap_write(max98373->regmap,
+               MAX98373_R2020_PCM_TX_HIZ_EN_1,
+               ~tx_mask & 0xFF);
+       regmap_write(max98373->regmap,
+               MAX98373_R2021_PCM_TX_HIZ_EN_2,
+               (~tx_mask & 0xFF00) >> 8);
+
+       return 0;
+}
+
+#define MAX98373_RATES SNDRV_PCM_RATE_8000_96000
+
+#define MAX98373_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+       SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops max98373_dai_ops = {
+       .set_fmt = max98373_dai_set_fmt,
+       .hw_params = max98373_dai_hw_params,
+       .set_tdm_slot = max98373_dai_tdm_slot,
+};
+
+static int max98373_dac_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_POST_PMU:
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R20FF_GLOBAL_SHDN,
+                       MAX98373_GLOBAL_EN_MASK, 1);
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R20FF_GLOBAL_SHDN,
+                       MAX98373_GLOBAL_EN_MASK, 0);
+               max98373->tdm_mode = 0;
+               break;
+       default:
+               return 0;
+       }
+       return 0;
+}
+
+static const char * const max98373_switch_text[] = {
+       "Left", "Right", "LeftRight"};
+
+static const struct soc_enum dai_sel_enum =
+       SOC_ENUM_SINGLE(MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+               MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT,
+               3, max98373_switch_text);
+
+static const struct snd_kcontrol_new max98373_dai_controls =
+       SOC_DAPM_ENUM("DAI Sel", dai_sel_enum);
+
+static const struct snd_kcontrol_new max98373_vi_control =
+       SOC_DAPM_SINGLE("Switch", MAX98373_R202C_PCM_TX_EN, 0, 1, 0);
+
+static const struct snd_kcontrol_new max98373_spkfb_control =
+       SOC_DAPM_SINGLE("Switch", MAX98373_R2043_AMP_EN, 1, 1, 0);
+
+static const struct snd_soc_dapm_widget max98373_dapm_widgets[] = {
+SND_SOC_DAPM_DAC_E("Amp Enable", "HiFi Playback",
+       MAX98373_R202B_PCM_RX_EN, 0, 0, max98373_dac_event,
+       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("DAI Sel Mux", SND_SOC_NOPM, 0, 0,
+       &max98373_dai_controls),
+SND_SOC_DAPM_OUTPUT("BE_OUT"),
+SND_SOC_DAPM_AIF_OUT("Voltage Sense", "HiFi Capture", 0,
+       MAX98373_R2047_IV_SENSE_ADC_EN, 0, 0),
+SND_SOC_DAPM_AIF_OUT("Current Sense", "HiFi Capture", 0,
+       MAX98373_R2047_IV_SENSE_ADC_EN, 1, 0),
+SND_SOC_DAPM_AIF_OUT("Speaker FB Sense", "HiFi Capture", 0,
+       SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_SWITCH("VI Sense", SND_SOC_NOPM, 0, 0,
+       &max98373_vi_control),
+SND_SOC_DAPM_SWITCH("SpkFB Sense", SND_SOC_NOPM, 0, 0,
+       &max98373_spkfb_control),
+SND_SOC_DAPM_SIGGEN("VMON"),
+SND_SOC_DAPM_SIGGEN("IMON"),
+SND_SOC_DAPM_SIGGEN("FBMON"),
+};
+
+static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, 0, -50, 0);
+static const DECLARE_TLV_DB_RANGE(max98373_spk_tlv,
+       0, 8, TLV_DB_SCALE_ITEM(0, 50, 0),
+       9, 10, TLV_DB_SCALE_ITEM(500, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_spkgain_max_tlv,
+       0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_step_size_tlv,
+       0, 1, TLV_DB_SCALE_ITEM(25, 25, 0),
+       2, 4, TLV_DB_SCALE_ITEM(100, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_spkgain_min_tlv,
+       0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_rotation_point_tlv,
+       0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
+       2, 7, TLV_DB_SCALE_ITEM(-200, -100, 0),
+       8, 9, TLV_DB_SCALE_ITEM(-1000, -200, 0),
+       10, 11, TLV_DB_SCALE_ITEM(-1500, -300, 0),
+       12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
+       14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_limiter_thresh_tlv,
+       0, 15, TLV_DB_SCALE_ITEM(0, -100, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98373_bde_gain_tlv,
+       0, 60, TLV_DB_SCALE_ITEM(0, -25, 0),
+);
+
+static bool max98373_readable_register(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case MAX98373_R2001_INT_RAW1 ... MAX98373_R200C_INT_EN3:
+       case MAX98373_R2010_IRQ_CTRL:
+       case MAX98373_R2014_THERM_WARN_THRESH
+               ... MAX98373_R2018_THERM_FOLDBACK_EN:
+       case MAX98373_R201E_PIN_DRIVE_STRENGTH
+               ... MAX98373_R2036_SOUNDWIRE_CTRL:
+       case MAX98373_R203D_AMP_DIG_VOL_CTRL ... MAX98373_R2043_AMP_EN:
+       case MAX98373_R2046_IV_SENSE_ADC_DSP_CFG
+               ... MAX98373_R2047_IV_SENSE_ADC_EN:
+       case MAX98373_R2051_MEAS_ADC_SAMPLING_RATE
+               ... MAX98373_R2056_MEAS_ADC_PVDD_CH_EN:
+       case MAX98373_R2090_BDE_LVL_HOLD ... MAX98373_R2092_BDE_CLIPPER_MODE:
+       case MAX98373_R2097_BDE_L1_THRESH
+               ... MAX98373_R209B_BDE_THRESH_HYST:
+       case MAX98373_R20A8_BDE_L1_CFG_1 ... MAX98373_R20B3_BDE_L4_CFG_3:
+       case MAX98373_R20B5_BDE_EN ... MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+       case MAX98373_R20D1_DHT_CFG ... MAX98373_R20D4_DHT_EN:
+       case MAX98373_R20E0_LIMITER_THRESH_CFG ... MAX98373_R20E2_LIMITER_EN:
+       case MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG
+               ... MAX98373_R20FF_GLOBAL_SHDN:
+       case MAX98373_R21FF_REV_ID:
+               return true;
+       default:
+               return false;
+       }
+};
+
+static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+       case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+       case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+       case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+       case MAX98373_R21FF_REV_ID:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static const char * const max98373_output_voltage_lvl_text[] = {
+       "5.43V", "6.09V", "6.83V", "7.67V", "8.60V",
+       "9.65V", "10.83V", "12.15V", "13.63V", "15.29V"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_out_volt_enum,
+                           MAX98373_R203E_AMP_PATH_GAIN, 0,
+                           max98373_output_voltage_lvl_text);
+
+static const char * const max98373_dht_attack_rate_text[] = {
+       "17.5us", "35us", "70us", "140us",
+       "280us", "560us", "1120us", "2240us"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_attack_rate_enum,
+                           MAX98373_R20D2_DHT_ATTACK_CFG, 0,
+                           max98373_dht_attack_rate_text);
+
+static const char * const max98373_dht_release_rate_text[] = {
+       "45ms", "225ms", "450ms", "1150ms",
+       "2250ms", "3100ms", "4500ms", "6750ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_release_rate_enum,
+                           MAX98373_R20D3_DHT_RELEASE_CFG, 0,
+                           max98373_dht_release_rate_text);
+
+static const char * const max98373_limiter_attack_rate_text[] = {
+       "10us", "20us", "40us", "80us",
+       "160us", "320us", "640us", "1.28ms",
+       "2.56ms", "5.12ms", "10.24ms", "20.48ms",
+       "40.96ms", "81.92ms", "16.384ms", "32.768ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_attack_rate_enum,
+                           MAX98373_R20E1_LIMITER_ATK_REL_RATES, 4,
+                           max98373_limiter_attack_rate_text);
+
+static const char * const max98373_limiter_release_rate_text[] = {
+       "40us", "80us", "160us", "320us",
+       "640us", "1.28ms", "2.56ms", "5.120ms",
+       "10.24ms", "20.48ms", "40.96ms", "81.92ms",
+       "163.84ms", "327.68ms", "655.36ms", "1310.72ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_release_rate_enum,
+                           MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0,
+                           max98373_limiter_release_rate_text);
+
+static const char * const max98373_ADC_samplerate_text[] = {
+       "333kHz", "192kHz", "64kHz", "48kHz"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
+                           MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
+                           max98373_ADC_samplerate_text);
+
+static const struct snd_kcontrol_new max98373_snd_controls[] = {
+SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
+       MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Volume Location Switch", MAX98373_R203F_AMP_DSP_CFG,
+       MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Up Switch", MAX98373_R203F_AMP_DSP_CFG,
+       MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Down Switch", MAX98373_R203F_AMP_DSP_CFG,
+       MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT, 1, 0),
+SOC_SINGLE("CLK Monitor Switch", MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG,
+       MAX98373_CLOCK_MON_SHIFT, 1, 0),
+SOC_SINGLE("Dither Switch", MAX98373_R203F_AMP_DSP_CFG,
+       MAX98373_AMP_DSP_CFG_DITH_SHIFT, 1, 0),
+SOC_SINGLE("DC Blocker Switch", MAX98373_R203F_AMP_DSP_CFG,
+       MAX98373_AMP_DSP_CFG_DCBLK_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Digital Volume", MAX98373_R203D_AMP_DIG_VOL_CTRL,
+       0, 0x7F, 0, max98373_digital_tlv),
+SOC_SINGLE_TLV("Speaker Volume", MAX98373_R203E_AMP_PATH_GAIN,
+       MAX98373_SPK_DIGI_GAIN_SHIFT, 10, 0, max98373_spk_tlv),
+SOC_SINGLE_TLV("FS Max Volume", MAX98373_R203E_AMP_PATH_GAIN,
+       MAX98373_FS_GAIN_MAX_SHIFT, 9, 0, max98373_spkgain_max_tlv),
+SOC_ENUM("Output Voltage", max98373_out_volt_enum),
+/* Dynamic Headroom Tracking */
+SOC_SINGLE("DHT Switch", MAX98373_R20D4_DHT_EN,
+       MAX98373_DHT_EN_SHIFT, 1, 0),
+SOC_SINGLE_TLV("DHT Min Volume", MAX98373_R20D1_DHT_CFG,
+       MAX98373_DHT_SPK_GAIN_MIN_SHIFT, 9, 0, max98373_dht_spkgain_min_tlv),
+SOC_SINGLE_TLV("DHT Rot Pnt Volume", MAX98373_R20D1_DHT_CFG,
+       MAX98373_DHT_ROT_PNT_SHIFT, 15, 0, max98373_dht_rotation_point_tlv),
+SOC_SINGLE_TLV("DHT Attack Step Volume", MAX98373_R20D2_DHT_ATTACK_CFG,
+       MAX98373_DHT_ATTACK_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_SINGLE_TLV("DHT Release Step Volume", MAX98373_R20D3_DHT_RELEASE_CFG,
+       MAX98373_DHT_RELEASE_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_ENUM("DHT Attack Rate", max98373_dht_attack_rate_enum),
+SOC_ENUM("DHT Release Rate", max98373_dht_release_rate_enum),
+/* ADC configuration */
+SOC_SINGLE("ADC PVDD CH Switch", MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0, 1, 0),
+SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+       MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+       MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+       0, 0x3, 0),
+SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+       0, 0x3, 0),
+SOC_ENUM("ADC SampleRate", max98373_adc_samplerate_enum),
+/* Brownout Detection Engine */
+SOC_SINGLE("BDE Switch", MAX98373_R20B5_BDE_EN, MAX98373_BDE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Mute Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+       MAX98373_LVL4_MUTE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Hold Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+       MAX98373_LVL4_HOLD_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
+SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
+SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
+SOC_SINGLE("BDE Attack Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 4, 0xF, 0),
+SOC_SINGLE("BDE Release Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0, 0xF, 0),
+SOC_SINGLE_TLV("BDE LVL1 Clip Thresh Volume", MAX98373_R20A9_BDE_L1_CFG_2,
+       0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Thresh Volume", MAX98373_R20AC_BDE_L2_CFG_2,
+       0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Thresh Volume", MAX98373_R20AF_BDE_L3_CFG_2,
+       0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Thresh Volume", MAX98373_R20B2_BDE_L4_CFG_2,
+       0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Clip Reduction Volume", MAX98373_R20AA_BDE_L1_CFG_3,
+       0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Reduction Volume", MAX98373_R20AD_BDE_L2_CFG_3,
+       0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Reduction Volume", MAX98373_R20B0_BDE_L3_CFG_3,
+       0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Reduction Volume", MAX98373_R20B3_BDE_L4_CFG_3,
+       0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Limiter Thresh Volume", MAX98373_R20A8_BDE_L1_CFG_1,
+       0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Limiter Thresh Volume", MAX98373_R20AB_BDE_L2_CFG_1,
+       0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Limiter Thresh Volume", MAX98373_R20AE_BDE_L3_CFG_1,
+       0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Limiter Thresh Volume", MAX98373_R20B1_BDE_L4_CFG_1,
+       0, 0xF, 0, max98373_limiter_thresh_tlv),
+/* Limiter */
+SOC_SINGLE("Limiter Switch", MAX98373_R20E2_LIMITER_EN,
+       MAX98373_LIMITER_EN_SHIFT, 1, 0),
+SOC_SINGLE("Limiter Src Switch", MAX98373_R20E0_LIMITER_THRESH_CFG,
+       MAX98373_LIMITER_THRESH_SRC_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Limiter Thresh Volume", MAX98373_R20E0_LIMITER_THRESH_CFG,
+       MAX98373_LIMITER_THRESH_SHIFT, 15, 0, max98373_limiter_thresh_tlv),
+SOC_ENUM("Limiter Attack Rate", max98373_limiter_attack_rate_enum),
+SOC_ENUM("Limiter Release Rate", max98373_limiter_release_rate_enum),
+};
+
+static const struct snd_soc_dapm_route max98373_audio_map[] = {
+       /* Plabyack */
+       {"DAI Sel Mux", "Left", "Amp Enable"},
+       {"DAI Sel Mux", "Right", "Amp Enable"},
+       {"DAI Sel Mux", "LeftRight", "Amp Enable"},
+       {"BE_OUT", NULL, "DAI Sel Mux"},
+       /* Capture */
+       { "VI Sense", "Switch", "VMON" },
+       { "VI Sense", "Switch", "IMON" },
+       { "SpkFB Sense", "Switch", "FBMON" },
+       { "Voltage Sense", NULL, "VI Sense" },
+       { "Current Sense", NULL, "VI Sense" },
+       { "Speaker FB Sense", NULL, "SpkFB Sense" },
+};
+
+static struct snd_soc_dai_driver max98373_dai[] = {
+       {
+               .name = "max98373-aif1",
+               .playback = {
+                       .stream_name = "HiFi Playback",
+                       .channels_min = 1,
+                       .channels_max = 2,
+                       .rates = MAX98373_RATES,
+                       .formats = MAX98373_FORMATS,
+               },
+               .capture = {
+                       .stream_name = "HiFi Capture",
+                       .channels_min = 1,
+                       .channels_max = 2,
+                       .rates = MAX98373_RATES,
+                       .formats = MAX98373_FORMATS,
+               },
+               .ops = &max98373_dai_ops,
+       }
+};
+
+static int max98373_probe(struct snd_soc_codec *codec)
+{
+       struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+       codec->control_data = max98373->regmap;
+
+       /* Software Reset */
+       regmap_write(max98373->regmap,
+               MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+
+       /* IV default slot configuration */
+       regmap_write(max98373->regmap,
+               MAX98373_R2020_PCM_TX_HIZ_EN_1,
+               0xFF);
+       regmap_write(max98373->regmap,
+               MAX98373_R2021_PCM_TX_HIZ_EN_2,
+               0xFF);
+       /* L/R mix configuration */
+       regmap_write(max98373->regmap,
+               MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+               0x80);
+       regmap_write(max98373->regmap,
+               MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+               0x1);
+       /* Set inital volume (0dB) */
+       regmap_write(max98373->regmap,
+               MAX98373_R203D_AMP_DIG_VOL_CTRL,
+               0x00);
+       regmap_write(max98373->regmap,
+               MAX98373_R203E_AMP_PATH_GAIN,
+               0x00);
+       /* Enable DC blocker */
+       regmap_write(max98373->regmap,
+               MAX98373_R203F_AMP_DSP_CFG,
+               0x3);
+       /* Enable IMON VMON DC blocker */
+       regmap_write(max98373->regmap,
+               MAX98373_R2046_IV_SENSE_ADC_DSP_CFG,
+               0x7);
+       /* voltage, current slot configuration */
+       regmap_write(max98373->regmap,
+               MAX98373_R2022_PCM_TX_SRC_1,
+               (max98373->i_slot << MAX98373_PCM_TX_CH_SRC_A_I_SHIFT |
+               max98373->v_slot) & 0xFF);
+       if (max98373->v_slot < 8)
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R2020_PCM_TX_HIZ_EN_1,
+                       1 << max98373->v_slot, 0);
+       else
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R2021_PCM_TX_HIZ_EN_2,
+                       1 << (max98373->v_slot - 8), 0);
+
+       if (max98373->i_slot < 8)
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R2020_PCM_TX_HIZ_EN_1,
+                       1 << max98373->i_slot, 0);
+       else
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R2021_PCM_TX_HIZ_EN_2,
+                       1 << (max98373->i_slot - 8), 0);
+
+       /* speaker feedback slot configuration */
+       regmap_write(max98373->regmap,
+               MAX98373_R2023_PCM_TX_SRC_2,
+               max98373->spkfb_slot & 0xFF);
+
+       /* Set interleave mode */
+       if (max98373->interleave_mode)
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R2024_PCM_DATA_FMT_CFG,
+                       MAX98373_PCM_TX_CH_INTERLEAVE_MASK,
+                       MAX98373_PCM_TX_CH_INTERLEAVE_MASK);
+
+       /* Speaker enable */
+       regmap_update_bits(max98373->regmap,
+               MAX98373_R2043_AMP_EN,
+               MAX98373_SPK_EN_MASK, 1);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max98373_suspend(struct device *dev)
+{
+       struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+       regcache_cache_only(max98373->regmap, true);
+       regcache_mark_dirty(max98373->regmap);
+       return 0;
+}
+static int max98373_resume(struct device *dev)
+{
+       struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+       regmap_write(max98373->regmap,
+               MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+       regcache_cache_only(max98373->regmap, false);
+       regcache_sync(max98373->regmap);
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops max98373_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(max98373_suspend, max98373_resume)
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_max98373 = {
+       .probe = max98373_probe,
+       .component_driver = {
+               .controls = max98373_snd_controls,
+               .num_controls = ARRAY_SIZE(max98373_snd_controls),
+               .dapm_widgets = max98373_dapm_widgets,
+               .num_dapm_widgets = ARRAY_SIZE(max98373_dapm_widgets),
+               .dapm_routes = max98373_audio_map,
+               .num_dapm_routes = ARRAY_SIZE(max98373_audio_map),
+       },
+};
+
+static const struct regmap_config max98373_regmap = {
+       .reg_bits = 16,
+       .val_bits = 8,
+       .max_register = MAX98373_R21FF_REV_ID,
+       .reg_defaults  = max98373_reg,
+       .num_reg_defaults = ARRAY_SIZE(max98373_reg),
+       .readable_reg = max98373_readable_register,
+       .volatile_reg = max98373_volatile_reg,
+       .cache_type = REGCACHE_RBTREE,
+};
+
+static void max98373_slot_config(struct i2c_client *i2c,
+       struct max98373_priv *max98373)
+{
+       int value;
+       struct device *dev = &i2c->dev;
+
+       if (!device_property_read_u32(dev, "maxim,vmon-slot-no", &value))
+               max98373->v_slot = value & 0xF;
+       else
+               max98373->v_slot = 0;
+
+       if (!device_property_read_u32(dev, "maxim,imon-slot-no", &value))
+               max98373->i_slot = value & 0xF;
+       else
+               max98373->i_slot = 1;
+
+       if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
+               max98373->spkfb_slot = value & 0xF;
+       else
+               max98373->spkfb_slot = 2;
+}
+
+static int max98373_i2c_probe(struct i2c_client *i2c,
+       const struct i2c_device_id *id)
+{
+
+       int ret = 0;
+       int reg = 0;
+       struct max98373_priv *max98373 = NULL;
+
+       max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
+
+       if (!max98373) {
+               ret = -ENOMEM;
+               return ret;
+       }
+       i2c_set_clientdata(i2c, max98373);
+
+       /* update interleave mode info */
+       if (device_property_read_bool(&i2c->dev, "maxim,interleave_mode"))
+               max98373->interleave_mode = 1;
+       else
+               max98373->interleave_mode = 0;
+
+
+       /* regmap initialization */
+       max98373->regmap
+               = devm_regmap_init_i2c(i2c, &max98373_regmap);
+       if (IS_ERR(max98373->regmap)) {
+               ret = PTR_ERR(max98373->regmap);
+               dev_err(&i2c->dev,
+                       "Failed to allocate regmap: %d\n", ret);
+               return ret;
+       }
+
+       /* Check Revision ID */
+       ret = regmap_read(max98373->regmap,
+               MAX98373_R21FF_REV_ID, &reg);
+       if (ret < 0) {
+               dev_err(&i2c->dev,
+                       "Failed to read: 0x%02X\n", MAX98373_R21FF_REV_ID);
+               return ret;
+       }
+       dev_info(&i2c->dev, "MAX98373 revisionID: 0x%02X\n", reg);
+
+       /* voltage/current slot configuration */
+       max98373_slot_config(i2c, max98373);
+
+       /* codec registeration */
+       ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98373,
+               max98373_dai, ARRAY_SIZE(max98373_dai));
+       if (ret < 0)
+               dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
+
+       return ret;
+}
+
+static int max98373_i2c_remove(struct i2c_client *client)
+{
+       snd_soc_unregister_codec(&client->dev);
+       return 0;
+}
+
+static const struct i2c_device_id max98373_i2c_id[] = {
+       { "max98373", 0},
+       { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max98373_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id max98373_of_match[] = {
+       { .compatible = "maxim,max98373", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max98373_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id max98373_acpi_match[] = {
+       { "MX98373", 0 },
+       {},
+};
+MODULE_DEVICE_TABLE(acpi, max98373_acpi_match);
+#endif
+
+static struct i2c_driver max98373_i2c_driver = {
+       .driver = {
+               .name = "max98373",
+               .of_match_table = of_match_ptr(max98373_of_match),
+               .acpi_match_table = ACPI_PTR(max98373_acpi_match),
+               .pm = &max98373_pm,
+       },
+       .probe = max98373_i2c_probe,
+       .remove = max98373_i2c_remove,
+       .id_table = max98373_i2c_id,
+};
+
+module_i2c_driver(max98373_i2c_driver)
+
+MODULE_DESCRIPTION("ALSA SoC MAX98373 driver");
+MODULE_AUTHOR("Ryan Lee <ryans.lee@maximintegrated.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
new file mode 100644 (file)
index 0000000..d0b359d
--- /dev/null
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, Maxim Integrated */
+#ifndef _MAX98373_H
+#define _MAX98373_H
+
+#define MAX98373_R2000_SW_RESET 0x2000
+#define MAX98373_R2001_INT_RAW1 0x2001
+#define MAX98373_R2002_INT_RAW2 0x2002
+#define MAX98373_R2003_INT_RAW3 0x2003
+#define MAX98373_R2004_INT_STATE1 0x2004
+#define MAX98373_R2005_INT_STATE2 0x2005
+#define MAX98373_R2006_INT_STATE3 0x2006
+#define MAX98373_R2007_INT_FLAG1 0x2007
+#define MAX98373_R2008_INT_FLAG2 0x2008
+#define MAX98373_R2009_INT_FLAG3 0x2009
+#define MAX98373_R200A_INT_EN1 0x200A
+#define MAX98373_R200B_INT_EN2 0x200B
+#define MAX98373_R200C_INT_EN3 0x200C
+#define MAX98373_R200D_INT_FLAG_CLR1 0x200D
+#define MAX98373_R200E_INT_FLAG_CLR2 0x200E
+#define MAX98373_R200F_INT_FLAG_CLR3 0x200F
+#define MAX98373_R2010_IRQ_CTRL 0x2010
+#define MAX98373_R2014_THERM_WARN_THRESH 0x2014
+#define MAX98373_R2015_THERM_SHDN_THRESH 0x2015
+#define MAX98373_R2016_THERM_HYSTERESIS 0x2016
+#define MAX98373_R2017_THERM_FOLDBACK_SET 0x2017
+#define MAX98373_R2018_THERM_FOLDBACK_EN 0x2018
+#define MAX98373_R201E_PIN_DRIVE_STRENGTH 0x201E
+#define MAX98373_R2020_PCM_TX_HIZ_EN_1 0x2020
+#define MAX98373_R2021_PCM_TX_HIZ_EN_2 0x2021
+#define MAX98373_R2022_PCM_TX_SRC_1 0x2022
+#define MAX98373_R2023_PCM_TX_SRC_2 0x2023
+#define MAX98373_R2024_PCM_DATA_FMT_CFG        0x2024
+#define MAX98373_R2025_AUDIO_IF_MODE 0x2025
+#define MAX98373_R2026_PCM_CLOCK_RATIO 0x2026
+#define MAX98373_R2027_PCM_SR_SETUP_1 0x2027
+#define MAX98373_R2028_PCM_SR_SETUP_2 0x2028
+#define MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 0x2029
+#define MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2 0x202A
+#define MAX98373_R202B_PCM_RX_EN 0x202B
+#define MAX98373_R202C_PCM_TX_EN 0x202C
+#define MAX98373_R202E_ICC_RX_CH_EN_1 0x202E
+#define MAX98373_R202F_ICC_RX_CH_EN_2 0x202F
+#define MAX98373_R2030_ICC_TX_HIZ_EN_1 0x2030
+#define MAX98373_R2031_ICC_TX_HIZ_EN_2 0x2031
+#define MAX98373_R2032_ICC_LINK_EN_CFG 0x2032
+#define MAX98373_R2034_ICC_TX_CNTL 0x2034
+#define MAX98373_R2035_ICC_TX_EN 0x2035
+#define MAX98373_R2036_SOUNDWIRE_CTRL 0x2036
+#define MAX98373_R203D_AMP_DIG_VOL_CTRL 0x203D
+#define MAX98373_R203E_AMP_PATH_GAIN 0x203E
+#define MAX98373_R203F_AMP_DSP_CFG 0x203F
+#define MAX98373_R2040_TONE_GEN_CFG 0x2040
+#define MAX98373_R2041_AMP_CFG 0x2041
+#define MAX98373_R2042_AMP_EDGE_RATE_CFG 0x2042
+#define MAX98373_R2043_AMP_EN 0x2043
+#define MAX98373_R2046_IV_SENSE_ADC_DSP_CFG 0x2046
+#define MAX98373_R2047_IV_SENSE_ADC_EN 0x2047
+#define MAX98373_R2051_MEAS_ADC_SAMPLING_RATE 0x2051
+#define MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG 0x2052
+#define MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG 0x2053
+#define MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK 0x2054
+#define MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK 0x2055
+#define MAX98373_R2056_MEAS_ADC_PVDD_CH_EN 0x2056
+#define MAX98373_R2090_BDE_LVL_HOLD 0x2090
+#define MAX98373_R2091_BDE_GAIN_ATK_REL_RATE 0x2091
+#define MAX98373_R2092_BDE_CLIPPER_MODE 0x2092
+#define MAX98373_R2097_BDE_L1_THRESH 0x2097
+#define MAX98373_R2098_BDE_L2_THRESH 0x2098
+#define MAX98373_R2099_BDE_L3_THRESH 0x2099
+#define MAX98373_R209A_BDE_L4_THRESH 0x209A
+#define MAX98373_R209B_BDE_THRESH_HYST 0x209B
+#define MAX98373_R20A8_BDE_L1_CFG_1 0x20A8
+#define MAX98373_R20A9_BDE_L1_CFG_2 0x20A9
+#define MAX98373_R20AA_BDE_L1_CFG_3 0x20AA
+#define MAX98373_R20AB_BDE_L2_CFG_1 0x20AB
+#define MAX98373_R20AC_BDE_L2_CFG_2 0x20AC
+#define MAX98373_R20AD_BDE_L2_CFG_3 0x20AD
+#define MAX98373_R20AE_BDE_L3_CFG_1 0x20AE
+#define MAX98373_R20AF_BDE_L3_CFG_2 0x20AF
+#define MAX98373_R20B0_BDE_L3_CFG_3 0x20B0
+#define MAX98373_R20B1_BDE_L4_CFG_1 0x20B1
+#define MAX98373_R20B2_BDE_L4_CFG_2 0x20B2
+#define MAX98373_R20B3_BDE_L4_CFG_3 0x20B3
+#define MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE 0x20B4
+#define MAX98373_R20B5_BDE_EN 0x20B5
+#define MAX98373_R20B6_BDE_CUR_STATE_READBACK 0x20B6
+#define MAX98373_R20D1_DHT_CFG 0x20D1
+#define MAX98373_R20D2_DHT_ATTACK_CFG 0x20D2
+#define MAX98373_R20D3_DHT_RELEASE_CFG 0x20D3
+#define MAX98373_R20D4_DHT_EN 0x20D4
+#define MAX98373_R20E0_LIMITER_THRESH_CFG 0x20E0
+#define MAX98373_R20E1_LIMITER_ATK_REL_RATES 0x20E1
+#define MAX98373_R20E2_LIMITER_EN 0x20E2
+#define MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG 0x20FE
+#define MAX98373_R20FF_GLOBAL_SHDN 0x20FF
+#define MAX98373_R21FF_REV_ID 0x21FF
+
+/* MAX98373_R2022_PCM_TX_SRC_1 */
+#define MAX98373_PCM_TX_CH_SRC_A_V_SHIFT (0)
+#define MAX98373_PCM_TX_CH_SRC_A_I_SHIFT (4)
+
+/* MAX98373_R2024_PCM_DATA_FMT_CFG */
+#define MAX98373_PCM_MODE_CFG_FORMAT_MASK (0x7 << 3)
+#define MAX98373_PCM_MODE_CFG_FORMAT_SHIFT (3)
+#define MAX98373_PCM_TX_CH_INTERLEAVE_MASK (0x1 << 2)
+#define MAX98373_PCM_FORMAT_I2S (0x0 << 0)
+#define MAX98373_PCM_FORMAT_LJ (0x1 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE0 (0x3 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE1 (0x4 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE2 (0x5 << 0)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_MASK (0x3 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_16 (0x1 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_24 (0x2 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_32 (0x3 << 6)
+
+/* MAX98373_R2026_PCM_CLOCK_RATIO */
+#define MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE (0x1 << 4)
+#define MAX98373_PCM_CLK_SETUP_BSEL_MASK (0xF << 0)
+
+/* MAX98373_R2027_PCM_SR_SETUP_1 */
+#define MAX98373_PCM_SR_SET1_SR_MASK (0xF << 0)
+#define MAX98373_PCM_SR_SET1_SR_8000 (0x0 << 0)
+#define MAX98373_PCM_SR_SET1_SR_11025 (0x1 << 0)
+#define MAX98373_PCM_SR_SET1_SR_12000 (0x2 << 0)
+#define MAX98373_PCM_SR_SET1_SR_16000 (0x3 << 0)
+#define MAX98373_PCM_SR_SET1_SR_22050 (0x4 << 0)
+#define MAX98373_PCM_SR_SET1_SR_24000 (0x5 << 0)
+#define MAX98373_PCM_SR_SET1_SR_32000 (0x6 << 0)
+#define MAX98373_PCM_SR_SET1_SR_44100 (0x7 << 0)
+#define MAX98373_PCM_SR_SET1_SR_48000 (0x8 << 0)
+
+/* MAX98373_R2028_PCM_SR_SETUP_2 */
+#define MAX98373_PCM_SR_SET2_SR_MASK (0xF << 4)
+#define MAX98373_PCM_SR_SET2_SR_SHIFT (4)
+#define MAX98373_PCM_SR_SET2_IVADC_SR_MASK (0xF << 0)
+
+/* MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 */
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_MASK (0x3 << 6)
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT (6)
+#define MAX98373_PCM_TO_SPK_CH0_SRC_MASK (0xF << 0)
+
+/* MAX98373_R203E_AMP_PATH_GAIN */
+#define MAX98373_SPK_DIGI_GAIN_MASK (0xF << 4)
+#define MAX98373_SPK_DIGI_GAIN_SHIFT (4)
+#define MAX98373_FS_GAIN_MAX_MASK (0xF << 0)
+#define MAX98373_FS_GAIN_MAX_SHIFT (0)
+
+/* MAX98373_R203F_AMP_DSP_CFG */
+#define MAX98373_AMP_DSP_CFG_DCBLK_SHIFT (0)
+#define MAX98373_AMP_DSP_CFG_DITH_SHIFT (1)
+#define MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT (2)
+#define MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT (3)
+#define MAX98373_AMP_DSP_CFG_DAC_INV_SHIFT (5)
+#define MAX98373_AMP_VOL_SEL_SHIFT (7)
+
+/* MAX98373_R2043_AMP_EN */
+#define MAX98373_SPKFB_EN_MASK (0x1 << 1)
+#define MAX98373_SPK_EN_MASK (0x1 << 0)
+#define MAX98373_SPKFB_EN_SHIFT (1)
+
+/*MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG */
+#define MAX98373_FLT_EN_SHIFT (4)
+
+/* MAX98373_R20B2_BDE_L4_CFG_2 */
+#define MAX98373_LVL4_MUTE_EN_SHIFT (7)
+#define MAX98373_LVL4_HOLD_EN_SHIFT (6)
+
+/* MAX98373_R20B5_BDE_EN */
+#define MAX98373_BDE_EN_SHIFT (0)
+
+/* MAX98373_R20D1_DHT_CFG */
+#define MAX98373_DHT_SPK_GAIN_MIN_SHIFT        (4)
+#define MAX98373_DHT_ROT_PNT_SHIFT     (0)
+
+/* MAX98373_R20D2_DHT_ATTACK_CFG */
+#define MAX98373_DHT_ATTACK_STEP_SHIFT (3)
+#define MAX98373_DHT_ATTACK_RATE_SHIFT (0)
+
+/* MAX98373_R20D3_DHT_RELEASE_CFG */
+#define MAX98373_DHT_RELEASE_STEP_SHIFT (3)
+#define MAX98373_DHT_RELEASE_RATE_SHIFT (0)
+
+/* MAX98373_R20D4_DHT_EN */
+#define MAX98373_DHT_EN_SHIFT (0)
+
+/* MAX98373_R20E0_LIMITER_THRESH_CFG */
+#define MAX98373_LIMITER_THRESH_SHIFT (2)
+#define MAX98373_LIMITER_THRESH_SRC_SHIFT (0)
+
+/* MAX98373_R20E2_LIMITER_EN */
+#define MAX98373_LIMITER_EN_SHIFT (0)
+
+/* MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG */
+#define MAX98373_CLOCK_MON_SHIFT (0)
+
+/* MAX98373_R20FF_GLOBAL_SHDN */
+#define MAX98373_GLOBAL_EN_MASK (0x1 << 0)
+
+/* MAX98373_R2000_SW_RESET */
+#define MAX98373_SOFT_RESET (0x1 << 0)
+
+struct max98373_priv {
+       struct regmap *regmap;
+       unsigned int v_slot;
+       unsigned int i_slot;
+       unsigned int spkfb_slot;
+       bool interleave_mode;
+       unsigned int ch_size;
+       bool tdm_mode;
+};
+#endif
index 03d07bf..7b1d1b0 100644 (file)
@@ -490,7 +490,7 @@ static int max98926_probe(struct snd_soc_codec *codec)
        struct max98926_priv *max98926 = snd_soc_codec_get_drvdata(codec);
 
        max98926->codec = codec;
-       codec->control_data = max98926->regmap;
+
        /* Hi-Z all the slots */
        regmap_write(max98926->regmap, MAX98926_DOUT_HIZ_CFG4, 0xF0);
        return 0;
index a1d3935..f701fdc 100644 (file)
@@ -682,7 +682,6 @@ static int max98927_probe(struct snd_soc_codec *codec)
        struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
 
        max98927->codec = codec;
-       codec->control_data = max98927->regmap;
 
        /* Software Reset */
        regmap_write(max98927->regmap,
index 4fd8d1d..be7a45f 100644 (file)
@@ -610,6 +610,9 @@ static int mc13783_probe(struct snd_soc_codec *codec)
 {
        struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
 
+       snd_soc_codec_init_regmap(codec,
+                                 dev_get_regmap(codec->dev->parent, NULL));
+
        /* these are the reset values */
        mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX0, 0x25893);
        mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX1, 0x00d35A);
@@ -728,15 +731,9 @@ static struct snd_soc_dai_driver mc13783_dai_sync[] = {
        }
 };
 
-static struct regmap *mc13783_get_regmap(struct device *dev)
-{
-       return dev_get_regmap(dev->parent, NULL);
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_mc13783 = {
        .probe          = mc13783_probe,
        .remove         = mc13783_remove,
-       .get_regmap     = mc13783_get_regmap,
        .component_driver = {
                .controls               = mc13783_control_list,
                .num_controls           = ARRAY_SIZE(mc13783_control_list),
index 5f3c42c..44062bb 100644 (file)
 #define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
                        SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
 #define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-                                   SNDRV_PCM_FMTBIT_S24_LE)
+                                   SNDRV_PCM_FMTBIT_S32_LE)
 
 static int btn_mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 |
               SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4;
@@ -712,6 +712,8 @@ static int pm8916_wcd_analog_probe(struct snd_soc_codec *codec)
                return err;
        }
 
+       snd_soc_codec_init_regmap(codec,
+                                 dev_get_regmap(codec->dev->parent, NULL));
        snd_soc_codec_set_drvdata(codec, priv);
        priv->pmic_rev = snd_soc_read(codec, CDC_D_REVISION1);
        priv->codec_version = snd_soc_read(codec, CDC_D_PERPH_SUBTYPE);
@@ -943,11 +945,6 @@ static int pm8916_wcd_analog_set_jack(struct snd_soc_codec *codec,
        return 0;
 }
 
-static struct regmap *pm8916_get_regmap(struct device *dev)
-{
-       return dev_get_regmap(dev->parent, NULL);
-}
-
 static irqreturn_t mbhc_btn_release_irq_handler(int irq, void *arg)
 {
        struct pm8916_wcd_analog_priv *priv = arg;
@@ -1082,7 +1079,6 @@ static const struct snd_soc_codec_driver pm8916_wcd_analog = {
        .probe = pm8916_wcd_analog_probe,
        .remove = pm8916_wcd_analog_remove,
        .set_jack = pm8916_wcd_analog_set_jack,
-       .get_regmap = pm8916_get_regmap,
        .component_driver = {
                .controls = pm8916_wcd_analog_snd_controls,
                .num_controls = ARRAY_SIZE(pm8916_wcd_analog_snd_controls),
index a10a724..13354d6 100644 (file)
                                   SNDRV_PCM_RATE_32000 | \
                                   SNDRV_PCM_RATE_48000)
 #define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-                                    SNDRV_PCM_FMTBIT_S24_LE)
+                                    SNDRV_PCM_FMTBIT_S32_LE)
 
 struct msm8916_wcd_digital_priv {
        struct clk *ahbclk, *mclk;
@@ -645,7 +645,7 @@ static int msm8916_wcd_digital_hw_params(struct snd_pcm_substream *substream,
                                    RX_I2S_CTL_RX_I2S_MODE_MASK,
                                    RX_I2S_CTL_RX_I2S_MODE_16);
                break;
-       case SNDRV_PCM_FORMAT_S24_LE:
+       case SNDRV_PCM_FORMAT_S32_LE:
                snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
                                    TX_I2S_CTL_TX_I2S_MODE_MASK,
                                    TX_I2S_CTL_TX_I2S_MODE_32);
index f9c9933..b08fb7e 100644 (file)
@@ -233,6 +233,41 @@ static SOC_ENUM_SINGLE_DECL(
 static const struct snd_kcontrol_new digital_ch1_mux =
        SOC_DAPM_ENUM("Digital CH1 Select", digital_ch1_enum);
 
+static int adc_power_control(struct snd_soc_dapm_widget *w,
+               struct snd_kcontrol *k, int  event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
+
+       if (SND_SOC_DAPM_EVENT_ON(event)) {
+               msleep(300);
+               /* DO12 and DO34 pad output enable */
+               regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
+                       NAU8540_I2S_DO12_TRI, 0);
+               regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
+                       NAU8540_I2S_DO34_TRI, 0);
+       } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
+               regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
+                       NAU8540_I2S_DO12_TRI, NAU8540_I2S_DO12_TRI);
+               regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
+                       NAU8540_I2S_DO34_TRI, NAU8540_I2S_DO34_TRI);
+       }
+       return 0;
+}
+
+static int aiftx_power_control(struct snd_soc_dapm_widget *w,
+               struct snd_kcontrol *k, int  event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
+
+       if (SND_SOC_DAPM_EVENT_OFF(event)) {
+               regmap_write(nau8540->regmap, NAU8540_REG_RST, 0x0001);
+               regmap_write(nau8540->regmap, NAU8540_REG_RST, 0x0000);
+       }
+       return 0;
+}
+
 static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("MICBIAS2", NAU8540_REG_MIC_BIAS, 11, 0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("MICBIAS1", NAU8540_REG_MIC_BIAS, 10, 0, NULL, 0),
@@ -247,14 +282,18 @@ static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
        SND_SOC_DAPM_PGA("Frontend PGA3", NAU8540_REG_PWR, 14, 0, NULL, 0),
        SND_SOC_DAPM_PGA("Frontend PGA4", NAU8540_REG_PWR, 15, 0, NULL, 0),
 
-       SND_SOC_DAPM_ADC("ADC1", NULL,
-               NAU8540_REG_POWER_MANAGEMENT, 0, 0),
-       SND_SOC_DAPM_ADC("ADC2", NULL,
-               NAU8540_REG_POWER_MANAGEMENT, 1, 0),
-       SND_SOC_DAPM_ADC("ADC3", NULL,
-               NAU8540_REG_POWER_MANAGEMENT, 2, 0),
-       SND_SOC_DAPM_ADC("ADC4", NULL,
-               NAU8540_REG_POWER_MANAGEMENT, 3, 0),
+       SND_SOC_DAPM_ADC_E("ADC1", NULL,
+               NAU8540_REG_POWER_MANAGEMENT, 0, 0, adc_power_control,
+               SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+       SND_SOC_DAPM_ADC_E("ADC2", NULL,
+               NAU8540_REG_POWER_MANAGEMENT, 1, 0, adc_power_control,
+               SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+       SND_SOC_DAPM_ADC_E("ADC3", NULL,
+               NAU8540_REG_POWER_MANAGEMENT, 2, 0, adc_power_control,
+               SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+       SND_SOC_DAPM_ADC_E("ADC4", NULL,
+               NAU8540_REG_POWER_MANAGEMENT, 3, 0, adc_power_control,
+               SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
        SND_SOC_DAPM_PGA("ADC CH1", NAU8540_REG_ANALOG_PWR, 0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("ADC CH2", NAU8540_REG_ANALOG_PWR, 1, 0, NULL, 0),
@@ -270,7 +309,8 @@ static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
        SND_SOC_DAPM_MUX("Digital CH1 Mux",
                SND_SOC_NOPM, 0, 0, &digital_ch1_mux),
 
-       SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT_E("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0,
+               aiftx_power_control, SND_SOC_DAPM_POST_PMD),
 };
 
 static const struct snd_soc_dapm_route nau8540_dapm_routes[] = {
@@ -575,7 +615,8 @@ static void nau8540_fll_apply(struct regmap *regmap,
                NAU8540_CLK_SRC_MASK | NAU8540_CLK_MCLK_SRC_MASK,
                NAU8540_CLK_SRC_MCLK | fll_param->mclk_src);
        regmap_update_bits(regmap, NAU8540_REG_FLL1,
-               NAU8540_FLL_RATIO_MASK, fll_param->ratio);
+               NAU8540_FLL_RATIO_MASK | NAU8540_ICTRL_LATCH_MASK,
+               fll_param->ratio | (0x6 << NAU8540_ICTRL_LATCH_SFT));
        /* FLL 16-bit fractional input */
        regmap_write(regmap, NAU8540_REG_FLL2, fll_param->fll_frac);
        /* FLL 10-bit integer input */
@@ -596,13 +637,14 @@ static void nau8540_fll_apply(struct regmap *regmap,
                        NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
                        NAU8540_FLL_FTR_SW_FILTER);
                regmap_update_bits(regmap, NAU8540_REG_FLL6,
-                       NAU8540_SDM_EN, NAU8540_SDM_EN);
+                       NAU8540_SDM_EN | NAU8540_CUTOFF500,
+                       NAU8540_SDM_EN | NAU8540_CUTOFF500);
        } else {
                regmap_update_bits(regmap, NAU8540_REG_FLL5,
                        NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
                        NAU8540_FLL_FTR_SW_MASK, NAU8540_FLL_FTR_SW_ACCU);
-               regmap_update_bits(regmap,
-                       NAU8540_REG_FLL6, NAU8540_SDM_EN, 0);
+               regmap_update_bits(regmap, NAU8540_REG_FLL6,
+                       NAU8540_SDM_EN | NAU8540_CUTOFF500, 0);
        }
 }
 
@@ -617,17 +659,22 @@ static int nau8540_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
        switch (pll_id) {
        case NAU8540_CLK_FLL_MCLK:
                regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
-                       NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_MCLK);
+                       NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+                       NAU8540_FLL_CLK_SRC_MCLK | 0);
                break;
 
        case NAU8540_CLK_FLL_BLK:
                regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
-                       NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_BLK);
+                       NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+                       NAU8540_FLL_CLK_SRC_BLK |
+                       (0xf << NAU8540_GAIN_ERR_SFT));
                break;
 
        case NAU8540_CLK_FLL_FS:
                regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
-                       NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_FS);
+                       NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+                       NAU8540_FLL_CLK_SRC_FS |
+                       (0xf << NAU8540_GAIN_ERR_SFT));
                break;
 
        default:
@@ -710,9 +757,24 @@ static void nau8540_init_regs(struct nau8540 *nau8540)
        regmap_update_bits(regmap, NAU8540_REG_CLOCK_CTRL,
                NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN,
                NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN);
-       /* ADC OSR selection, CLK_ADC = Fs * OSR */
+       /* ADC OSR selection, CLK_ADC = Fs * OSR;
+        * Channel time alignment enable.
+        */
        regmap_update_bits(regmap, NAU8540_REG_ADC_SAMPLE_RATE,
-               NAU8540_ADC_OSR_MASK, NAU8540_ADC_OSR_64);
+               NAU8540_CH_SYNC | NAU8540_ADC_OSR_MASK,
+               NAU8540_CH_SYNC | NAU8540_ADC_OSR_64);
+       /* PGA input mode selection */
+       regmap_update_bits(regmap, NAU8540_REG_FEPGA1,
+               NAU8540_FEPGA1_MODCH2_SHT | NAU8540_FEPGA1_MODCH1_SHT,
+               NAU8540_FEPGA1_MODCH2_SHT | NAU8540_FEPGA1_MODCH1_SHT);
+       regmap_update_bits(regmap, NAU8540_REG_FEPGA2,
+               NAU8540_FEPGA2_MODCH4_SHT | NAU8540_FEPGA2_MODCH3_SHT,
+               NAU8540_FEPGA2_MODCH4_SHT | NAU8540_FEPGA2_MODCH3_SHT);
+       /* DO12 and DO34 pad output disable */
+       regmap_update_bits(regmap, NAU8540_REG_PCM_CTRL1,
+               NAU8540_I2S_DO12_TRI, NAU8540_I2S_DO12_TRI);
+       regmap_update_bits(regmap, NAU8540_REG_PCM_CTRL2,
+               NAU8540_I2S_DO34_TRI, NAU8540_I2S_DO34_TRI);
 }
 
 static int __maybe_unused nau8540_suspend(struct snd_soc_codec *codec)
index 5db5b22..732b490 100644 (file)
 #define NAU8540_CLK_MCLK_SRC_MASK      0xf
 
 /* FLL1 (0x04) */
+#define NAU8540_ICTRL_LATCH_SFT        10
+#define NAU8540_ICTRL_LATCH_MASK       (0x7 << NAU8540_ICTRL_LATCH_SFT)
 #define NAU8540_FLL_RATIO_MASK 0x7f
 
 /* FLL3 (0x06) */
+#define NAU8540_GAIN_ERR_SFT           12
+#define NAU8540_GAIN_ERR_MASK          (0xf << NAU8540_GAIN_ERR_SFT)
 #define NAU8540_FLL_CLK_SRC_SFT        10
 #define NAU8540_FLL_CLK_SRC_MASK       (0x3 << NAU8540_FLL_CLK_SRC_SFT)
 #define NAU8540_FLL_CLK_SRC_MCLK       (0 << NAU8540_FLL_CLK_SRC_SFT)
 /* FLL6 (0x9) */
 #define NAU8540_DCO_EN                 (0x1 << 15)
 #define NAU8540_SDM_EN                 (0x1 << 14)
+#define NAU8540_CUTOFF500              (0x1 << 13)
 
 /* PCM_CTRL0 (0x10) */
 #define NAU8540_I2S_BP_SFT             7
 #define NAU8540_I2S_DF_PCM_AB          0x3
 
 /* PCM_CTRL1 (0x11) */
+#define NAU8540_I2S_DO12_TRI           (0x1 << 15)
 #define NAU8540_I2S_LRC_DIV_SFT        12
 #define NAU8540_I2S_LRC_DIV_MASK       (0x3 << NAU8540_I2S_LRC_DIV_SFT)
 #define NAU8540_I2S_DO12_OE            (0x1 << 4)
 #define NAU8540_I2S_BLK_DIV_MASK       0x7
 
 /* PCM_CTRL1 (0x12) */
+#define NAU8540_I2S_DO34_TRI           (0x1 << 15)
 #define NAU8540_I2S_DO34_OE            (0x1 << 11)
 #define NAU8540_I2S_TSLOT_L_MASK       0x3ff
 
 #define NAU8540_TDM_TX_MASK            0xf
 
 /* ADC_SAMPLE_RATE (0x3A) */
+#define NAU8540_CH_SYNC                (0x1 << 14)
 #define NAU8540_ADC_OSR_MASK           0x3
 #define NAU8540_ADC_OSR_256            0x3
 #define NAU8540_ADC_OSR_128            0x2
 #define NAU8540_PRECHARGE_DIS          (0x1 << 13)
 #define NAU8540_GLOBAL_BIAS_EN (0x1 << 12)
 
+/* FEPGA1 (0x69) */
+#define NAU8540_FEPGA1_MODCH2_SHT_SFT  7
+#define NAU8540_FEPGA1_MODCH2_SHT      (0x1 << NAU8540_FEPGA1_MODCH2_SHT_SFT)
+#define NAU8540_FEPGA1_MODCH1_SHT_SFT  3
+#define NAU8540_FEPGA1_MODCH1_SHT      (0x1 << NAU8540_FEPGA1_MODCH1_SHT_SFT)
+
+/* FEPGA2 (0x6A) */
+#define NAU8540_FEPGA2_MODCH4_SHT_SFT  7
+#define NAU8540_FEPGA2_MODCH4_SHT      (0x1 << NAU8540_FEPGA2_MODCH4_SHT_SFT)
+#define NAU8540_FEPGA2_MODCH3_SHT_SFT  3
+#define NAU8540_FEPGA2_MODCH3_SHT      (0x1 << NAU8540_FEPGA2_MODCH3_SHT_SFT)
+
 
 /* System Clock Source */
 enum {
index 0240759..088e0ce 100644 (file)
@@ -43,7 +43,7 @@ static bool nau8824_is_jack_inserted(struct nau8824 *nau8824);
 
 /* the parameter threshold of FLL */
 #define NAU_FREF_MAX 13500000
-#define NAU_FVCO_MAX 124000000
+#define NAU_FVCO_MAX 100000000
 #define NAU_FVCO_MIN 90000000
 
 /* scaling for mclk from sysclk_src output */
@@ -811,7 +811,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
                NAU8824_JD_SLEEP_MODE, NAU8824_JD_SLEEP_MODE);
 
        /* Close clock for jack type detection at manual mode */
-       nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
+       if (dapm->bias_level < SND_SOC_BIAS_PREPARE)
+               nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
 }
 
 static void nau8824_jdet_work(struct work_struct *work)
@@ -843,6 +844,11 @@ static void nau8824_jdet_work(struct work_struct *work)
        event_mask |= SND_JACK_HEADSET;
        snd_soc_jack_report(nau8824->jack, event, event_mask);
 
+       /* Enable short key press and release interruption. */
+       regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
+               NAU8824_IRQ_KEY_RELEASE_DIS |
+               NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
+
        nau8824_sema_release(nau8824);
 }
 
@@ -850,15 +856,15 @@ static void nau8824_setup_auto_irq(struct nau8824 *nau8824)
 {
        struct regmap *regmap = nau8824->regmap;
 
-       /* Enable jack ejection, short key press and release interruption. */
+       /* Enable jack ejection interruption. */
        regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING_1,
                NAU8824_IRQ_INSERT_EN | NAU8824_IRQ_EJECT_EN,
                NAU8824_IRQ_EJECT_EN);
        regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
-               NAU8824_IRQ_EJECT_DIS | NAU8824_IRQ_KEY_RELEASE_DIS |
-               NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
+               NAU8824_IRQ_EJECT_DIS, 0);
        /* Enable internal VCO needed for interruptions */
-       nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
+       if (nau8824->dapm->bias_level < SND_SOC_BIAS_PREPARE)
+               nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
        regmap_update_bits(regmap, NAU8824_REG_ENA_CTRL,
                NAU8824_JD_SLEEP_MODE, 0);
 }
index 714ce17..a1b697b 100644 (file)
@@ -194,10 +194,10 @@ static const struct reg_default nau8825_reg_defaults[] = {
 
 /* register backup table when cross talk detection */
 static struct reg_default nau8825_xtalk_baktab[] = {
-       { NAU8825_REG_ADC_DGAIN_CTRL, 0 },
+       { NAU8825_REG_ADC_DGAIN_CTRL, 0x00cf },
        { NAU8825_REG_HSVOL_CTRL, 0 },
-       { NAU8825_REG_DACL_CTRL, 0 },
-       { NAU8825_REG_DACR_CTRL, 0 },
+       { NAU8825_REG_DACL_CTRL, 0x00cf },
+       { NAU8825_REG_DACR_CTRL, 0x02cf },
 };
 
 static const unsigned short logtable[256] = {
@@ -245,13 +245,14 @@ static const unsigned short logtable[256] = {
  * tasks are allowed to acquire the semaphore, calling this function will
  * put the task to sleep. If the semaphore is not released within the
  * specified number of jiffies, this function returns.
- * Acquires the semaphore without jiffies. If no more tasks are allowed
- * to acquire the semaphore, calling this function will put the task to
- * sleep until the semaphore is released.
  * If the semaphore is not released within the specified number of jiffies,
- * this function returns -ETIME.
- * If the sleep is interrupted by a signal, this function will return -EINTR.
- * It returns 0 if the semaphore was acquired successfully.
+ * this function returns -ETIME. If the sleep is interrupted by a signal,
+ * this function will return -EINTR. It returns 0 if the semaphore was
+ * acquired successfully.
+ *
+ * Acquires the semaphore without jiffies. Try to acquire the semaphore
+ * atomically. Returns 0 if the semaphore has been acquired successfully
+ * or 1 if it it cannot be acquired.
  */
 static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
 {
@@ -262,8 +263,8 @@ static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
                if (ret < 0)
                        dev_warn(nau8825->dev, "Acquire semaphore timeout\n");
        } else {
-               ret = down_interruptible(&nau8825->xtalk_sem);
-               if (ret < 0)
+               ret = down_trylock(&nau8825->xtalk_sem);
+               if (ret)
                        dev_warn(nau8825->dev, "Acquire semaphore fail\n");
        }
 
@@ -454,22 +455,32 @@ static void nau8825_xtalk_backup(struct nau8825 *nau8825)
 {
        int i;
 
+       if (nau8825->xtalk_baktab_initialized)
+               return;
+
        /* Backup some register values to backup table */
        for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++)
                regmap_read(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
                                &nau8825_xtalk_baktab[i].def);
+
+       nau8825->xtalk_baktab_initialized = true;
 }
 
-static void nau8825_xtalk_restore(struct nau8825 *nau8825)
+static void nau8825_xtalk_restore(struct nau8825 *nau8825, bool cause_cancel)
 {
        int i, volume;
 
+       if (!nau8825->xtalk_baktab_initialized)
+               return;
+
        /* Restore register values from backup table; When the driver restores
-        * the headphone volumem, it needs recover to original level gradually
-        * with 3dB per step for less pop noise.
+        * the headphone volume in XTALK_DONE state, it needs recover to
+        * original level gradually with 3dB per step for less pop noise.
+        * Otherwise, the restore should do ASAP.
         */
        for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++) {
-               if (nau8825_xtalk_baktab[i].reg == NAU8825_REG_HSVOL_CTRL) {
+               if (!cause_cancel && nau8825_xtalk_baktab[i].reg ==
+                       NAU8825_REG_HSVOL_CTRL) {
                        /* Ramping up the volume change to reduce pop noise */
                        volume = nau8825_xtalk_baktab[i].def &
                                NAU8825_HPR_VOL_MASK;
@@ -479,6 +490,8 @@ static void nau8825_xtalk_restore(struct nau8825 *nau8825)
                regmap_write(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
                                nau8825_xtalk_baktab[i].def);
        }
+
+       nau8825->xtalk_baktab_initialized = false;
 }
 
 static void nau8825_xtalk_prepare_dac(struct nau8825 *nau8825)
@@ -644,7 +657,7 @@ static void nau8825_xtalk_clean_adc(struct nau8825 *nau8825)
                NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_MASK, 0);
 }
 
-static void nau8825_xtalk_clean(struct nau8825 *nau8825)
+static void nau8825_xtalk_clean(struct nau8825 *nau8825, bool cause_cancel)
 {
        /* Enable internal VCO needed for interruptions */
        nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
@@ -660,7 +673,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
                NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
                NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
        /* Restore value of specific register for cross talk */
-       nau8825_xtalk_restore(nau8825);
+       nau8825_xtalk_restore(nau8825, cause_cancel);
 }
 
 static void nau8825_xtalk_imm_start(struct nau8825 *nau8825, int vol)
@@ -779,7 +792,7 @@ static void nau8825_xtalk_measure(struct nau8825 *nau8825)
                dev_dbg(nau8825->dev, "cross talk sidetone: %x\n", sidetone);
                regmap_write(nau8825->regmap, NAU8825_REG_DAC_DGAIN_CTRL,
                                        (sidetone << 8) | sidetone);
-               nau8825_xtalk_clean(nau8825);
+               nau8825_xtalk_clean(nau8825, false);
                nau8825->xtalk_state = NAU8825_XTALK_DONE;
                break;
        default:
@@ -815,13 +828,14 @@ static void nau8825_xtalk_work(struct work_struct *work)
 
 static void nau8825_xtalk_cancel(struct nau8825 *nau8825)
 {
-       /* If the xtalk_protect is true, that means the process is still
-        * on going. The driver forces to cancel the cross talk task and
+       /* If the crosstalk is eanbled and the process is on going,
+        * the driver forces to cancel the crosstalk task and
         * restores the configuration to original status.
         */
-       if (nau8825->xtalk_protect) {
+       if (nau8825->xtalk_enable && nau8825->xtalk_state !=
+               NAU8825_XTALK_DONE) {
                cancel_work_sync(&nau8825->xtalk_work);
-               nau8825_xtalk_clean(nau8825);
+               nau8825_xtalk_clean(nau8825, true);
        }
        /* Reset parameters for cross talk suppression function */
        nau8825_sema_reset(nau8825);
@@ -905,6 +919,7 @@ static int nau8825_adc_event(struct snd_soc_dapm_widget *w,
 
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
+               msleep(125);
                regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
                        NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC);
                break;
@@ -1245,8 +1260,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
                regmap_read(nau8825->regmap, NAU8825_REG_DAC_CTRL1, &osr);
                osr &= NAU8825_DAC_OVERSAMPLE_MASK;
                if (nau8825_clock_check(nau8825, substream->stream,
-                       params_rate(params), osr))
+                       params_rate(params), osr)) {
+                       nau8825_sema_release(nau8825);
                        return -EINVAL;
+               }
                regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
                        NAU8825_CLK_DAC_SRC_MASK,
                        osr_dac_sel[osr].clk_src << NAU8825_CLK_DAC_SRC_SFT);
@@ -1254,8 +1271,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
                regmap_read(nau8825->regmap, NAU8825_REG_ADC_RATE, &osr);
                osr &= NAU8825_ADC_SYNC_DOWN_MASK;
                if (nau8825_clock_check(nau8825, substream->stream,
-                       params_rate(params), osr))
+                       params_rate(params), osr)) {
+                       nau8825_sema_release(nau8825);
                        return -EINVAL;
+               }
                regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
                        NAU8825_CLK_ADC_SRC_MASK,
                        osr_adc_sel[osr].clk_src << NAU8825_CLK_ADC_SRC_SFT);
@@ -1272,8 +1291,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
                        bclk_div = 1;
                else if (bclk_fs <= 128)
                        bclk_div = 0;
-               else
+               else {
+                       nau8825_sema_release(nau8825);
                        return -EINVAL;
+               }
                regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
                        NAU8825_I2S_LRC_DIV_MASK | NAU8825_I2S_BLK_DIV_MASK,
                        ((bclk_div + 1) << NAU8825_I2S_LRC_DIV_SFT) | bclk_div);
@@ -1293,6 +1314,7 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
                val_len |= NAU8825_I2S_DL_32;
                break;
        default:
+               nau8825_sema_release(nau8825);
                return -EINVAL;
        }
 
@@ -1311,8 +1333,6 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
        unsigned int ctrl1_val = 0, ctrl2_val = 0;
 
-       nau8825_sema_acquire(nau8825, 3 * HZ);
-
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBM_CFM:
                ctrl2_val |= NAU8825_I2S_MS_MASTER;
@@ -1354,6 +1374,8 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
                return -EINVAL;
        }
 
+       nau8825_sema_acquire(nau8825, 3 * HZ);
+
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
                NAU8825_I2S_DL_MASK | NAU8825_I2S_DF_MASK |
                NAU8825_I2S_BP_MASK | NAU8825_I2S_PCMB_MASK,
@@ -1686,7 +1708,7 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
        } else if (active_irq & NAU8825_HEADSET_COMPLETION_IRQ) {
                if (nau8825_is_jack_inserted(regmap)) {
                        event |= nau8825_jack_insert(nau8825);
-                       if (!nau8825->xtalk_bypass && !nau8825->high_imped) {
+                       if (nau8825->xtalk_enable && !nau8825->high_imped) {
                                /* Apply the cross talk suppression in the
                                 * headset without high impedance.
                                 */
@@ -1700,12 +1722,15 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
                                        int ret;
                                        nau8825->xtalk_protect = true;
                                        ret = nau8825_sema_acquire(nau8825, 0);
-                                       if (ret < 0)
+                                       if (ret)
                                                nau8825->xtalk_protect = false;
                                }
                                /* Startup cross talk detection process */
-                               nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
-                               schedule_work(&nau8825->xtalk_work);
+                               if (nau8825->xtalk_protect) {
+                                       nau8825->xtalk_state =
+                                               NAU8825_XTALK_PREPARE;
+                                       schedule_work(&nau8825->xtalk_work);
+                               }
                        } else {
                                /* The cross talk suppression shouldn't apply
                                 * in the headset with high impedance. Thus,
@@ -1732,7 +1757,9 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
                        nau8825->xtalk_event_mask = event_mask;
                }
        } else if (active_irq & NAU8825_IMPEDANCE_MEAS_IRQ) {
-               schedule_work(&nau8825->xtalk_work);
+               /* crosstalk detection enable and process on going */
+               if (nau8825->xtalk_enable && nau8825->xtalk_protect)
+                       schedule_work(&nau8825->xtalk_work);
                clear_irq = NAU8825_IMPEDANCE_MEAS_IRQ;
        } else if ((active_irq & NAU8825_JACK_INSERTION_IRQ_MASK) ==
                NAU8825_JACK_INSERTION_DETECTED) {
@@ -2381,7 +2408,7 @@ static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
        regcache_sync(nau8825->regmap);
        nau8825->xtalk_protect = true;
        ret = nau8825_sema_acquire(nau8825, 0);
-       if (ret < 0)
+       if (ret)
                nau8825->xtalk_protect = false;
        enable_irq(nau8825->irq);
 
@@ -2440,8 +2467,8 @@ static void nau8825_print_device_properties(struct nau8825 *nau8825)
                        nau8825->jack_insert_debounce);
        dev_dbg(dev, "jack-eject-debounce:  %d\n",
                        nau8825->jack_eject_debounce);
-       dev_dbg(dev, "crosstalk-bypass:     %d\n",
-                       nau8825->xtalk_bypass);
+       dev_dbg(dev, "crosstalk-enable:     %d\n",
+                       nau8825->xtalk_enable);
 }
 
 static int nau8825_read_device_properties(struct device *dev,
@@ -2506,8 +2533,8 @@ static int nau8825_read_device_properties(struct device *dev,
                &nau8825->jack_eject_debounce);
        if (ret)
                nau8825->jack_eject_debounce = 0;
-       nau8825->xtalk_bypass = device_property_read_bool(dev,
-               "nuvoton,crosstalk-bypass");
+       nau8825->xtalk_enable = device_property_read_bool(dev,
+               "nuvoton,crosstalk-enable");
 
        nau8825->mclk = devm_clk_get(dev, "mclk");
        if (PTR_ERR(nau8825->mclk) == -EPROBE_DEFER) {
@@ -2568,6 +2595,7 @@ static int nau8825_i2c_probe(struct i2c_client *i2c,
         */
        nau8825->xtalk_state = NAU8825_XTALK_DONE;
        nau8825->xtalk_protect = false;
+       nau8825->xtalk_baktab_initialized = false;
        sema_init(&nau8825->xtalk_sem, 1);
        INIT_WORK(&nau8825->xtalk_work, nau8825_xtalk_work);
 
index 8aee5c8..f7e7321 100644 (file)
@@ -476,7 +476,8 @@ struct nau8825 {
        int xtalk_event_mask;
        bool xtalk_protect;
        int imp_rms[NAU8825_XTALK_IMM];
-       int xtalk_bypass;
+       int xtalk_enable;
+       bool xtalk_baktab_initialized; /* True if initialized. */
 };
 
 int nau8825_enable_jack_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/pcm186x-i2c.c b/sound/soc/codecs/pcm186x-i2c.c
new file mode 100644 (file)
index 0000000..5436212
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC - I2C
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ *     Andreas Dannenberg <dannenberg@ti.com>
+ *     Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+#include "pcm186x.h"
+
+static const struct of_device_id pcm186x_of_match[] = {
+       { .compatible = "ti,pcm1862", .data = (void *)PCM1862 },
+       { .compatible = "ti,pcm1863", .data = (void *)PCM1863 },
+       { .compatible = "ti,pcm1864", .data = (void *)PCM1864 },
+       { .compatible = "ti,pcm1865", .data = (void *)PCM1865 },
+       { }
+};
+MODULE_DEVICE_TABLE(of, pcm186x_of_match);
+
+static int pcm186x_i2c_probe(struct i2c_client *i2c,
+                            const struct i2c_device_id *id)
+{
+       const enum pcm186x_type type = (enum pcm186x_type)id->driver_data;
+       int irq = i2c->irq;
+       struct regmap *regmap;
+
+       regmap = devm_regmap_init_i2c(i2c, &pcm186x_regmap);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       return pcm186x_probe(&i2c->dev, type, irq, regmap);
+}
+
+static int pcm186x_i2c_remove(struct i2c_client *i2c)
+{
+       pcm186x_remove(&i2c->dev);
+
+       return 0;
+}
+
+static const struct i2c_device_id pcm186x_i2c_id[] = {
+       { "pcm1862", PCM1862 },
+       { "pcm1863", PCM1863 },
+       { "pcm1864", PCM1864 },
+       { "pcm1865", PCM1865 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, pcm186x_i2c_id);
+
+static struct i2c_driver pcm186x_i2c_driver = {
+       .probe          = pcm186x_i2c_probe,
+       .remove         = pcm186x_i2c_remove,
+       .id_table       = pcm186x_i2c_id,
+       .driver         = {
+               .name   = "pcm186x",
+               .of_match_table = pcm186x_of_match,
+       },
+};
+module_i2c_driver(pcm186x_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC I2C Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x-spi.c b/sound/soc/codecs/pcm186x-spi.c
new file mode 100644 (file)
index 0000000..2366f8e
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC - SPI
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ *     Andreas Dannenberg <dannenberg@ti.com>
+ *     Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "pcm186x.h"
+
+static const struct of_device_id pcm186x_of_match[] = {
+       { .compatible = "ti,pcm1862", .data = (void *)PCM1862 },
+       { .compatible = "ti,pcm1863", .data = (void *)PCM1863 },
+       { .compatible = "ti,pcm1864", .data = (void *)PCM1864 },
+       { .compatible = "ti,pcm1865", .data = (void *)PCM1865 },
+       { }
+};
+MODULE_DEVICE_TABLE(of, pcm186x_of_match);
+
+static int pcm186x_spi_probe(struct spi_device *spi)
+{
+       const enum pcm186x_type type =
+                        (enum pcm186x_type)spi_get_device_id(spi)->driver_data;
+       int irq = spi->irq;
+       struct regmap *regmap;
+
+       regmap = devm_regmap_init_spi(spi, &pcm186x_regmap);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       return pcm186x_probe(&spi->dev, type, irq, regmap);
+}
+
+static int pcm186x_spi_remove(struct spi_device *spi)
+{
+       pcm186x_remove(&spi->dev);
+
+       return 0;
+}
+
+static const struct spi_device_id pcm186x_spi_id[] = {
+       { "pcm1862", PCM1862 },
+       { "pcm1863", PCM1863 },
+       { "pcm1864", PCM1864 },
+       { "pcm1865", PCM1865 },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, pcm186x_spi_id);
+
+static struct spi_driver pcm186x_spi_driver = {
+       .probe          = pcm186x_spi_probe,
+       .remove         = pcm186x_spi_remove,
+       .id_table       = pcm186x_spi_id,
+       .driver         = {
+               .name   = "pcm186x",
+               .of_match_table = pcm186x_of_match,
+       },
+};
+module_spi_driver(pcm186x_spi_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC SPI Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
new file mode 100644 (file)
index 0000000..cdb5142
--- /dev/null
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ *     Andreas Dannenberg <dannenberg@ti.com>
+ *     Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "pcm186x.h"
+
+static const char * const pcm186x_supply_names[] = {
+       "avdd",         /* Analog power supply. Connect to 3.3-V supply. */
+       "dvdd",         /* Digital power supply. Connect to 3.3-V supply. */
+       "iovdd",        /* I/O power supply. Connect to 3.3-V or 1.8-V. */
+};
+#define PCM186x_NUM_SUPPLIES ARRAY_SIZE(pcm186x_supply_names)
+
+struct pcm186x_priv {
+       struct regmap *regmap;
+       struct regulator_bulk_data supplies[PCM186x_NUM_SUPPLIES];
+       unsigned int sysclk;
+       unsigned int tdm_offset;
+       bool is_tdm_mode;
+       bool is_master_mode;
+};
+
+static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
+
+static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
+       SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
+                          PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
+                          pcm186x_pga_tlv),
+};
+
+static const struct snd_kcontrol_new pcm1865_snd_controls[] = {
+       SOC_DOUBLE_R_S_TLV("ADC1 Capture Volume", PCM186X_PGA_VAL_CH1_L,
+                          PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
+                          pcm186x_pga_tlv),
+       SOC_DOUBLE_R_S_TLV("ADC2 Capture Volume", PCM186X_PGA_VAL_CH2_L,
+                          PCM186X_PGA_VAL_CH2_R, 0, -24, 80, 7, 0,
+                          pcm186x_pga_tlv),
+};
+
+static const unsigned int pcm186x_adc_input_channel_sel_value[] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+       0x10, 0x20, 0x30
+};
+
+static const char * const pcm186x_adcl_input_channel_sel_text[] = {
+       "No Select",
+       "VINL1[SE]",                                    /* Default for ADC1L */
+       "VINL2[SE]",                                    /* Default for ADC2L */
+       "VINL2[SE] + VINL1[SE]",
+       "VINL3[SE]",
+       "VINL3[SE] + VINL1[SE]",
+       "VINL3[SE] + VINL2[SE]",
+       "VINL3[SE] + VINL2[SE] + VINL1[SE]",
+       "VINL4[SE]",
+       "VINL4[SE] + VINL1[SE]",
+       "VINL4[SE] + VINL2[SE]",
+       "VINL4[SE] + VINL2[SE] + VINL1[SE]",
+       "VINL4[SE] + VINL3[SE]",
+       "VINL4[SE] + VINL3[SE] + VINL1[SE]",
+       "VINL4[SE] + VINL3[SE] + VINL2[SE]",
+       "VINL4[SE] + VINL3[SE] + VINL2[SE] + VINL1[SE]",
+       "{VIN1P, VIN1M}[DIFF]",
+       "{VIN4P, VIN4M}[DIFF]",
+       "{VIN1P, VIN1M}[DIFF] + {VIN4P, VIN4M}[DIFF]"
+};
+
+static const char * const pcm186x_adcr_input_channel_sel_text[] = {
+       "No Select",
+       "VINR1[SE]",                                    /* Default for ADC1R */
+       "VINR2[SE]",                                    /* Default for ADC2R */
+       "VINR2[SE] + VINR1[SE]",
+       "VINR3[SE]",
+       "VINR3[SE] + VINR1[SE]",
+       "VINR3[SE] + VINR2[SE]",
+       "VINR3[SE] + VINR2[SE] + VINR1[SE]",
+       "VINR4[SE]",
+       "VINR4[SE] + VINR1[SE]",
+       "VINR4[SE] + VINR2[SE]",
+       "VINR4[SE] + VINR2[SE] + VINR1[SE]",
+       "VINR4[SE] + VINR3[SE]",
+       "VINR4[SE] + VINR3[SE] + VINR1[SE]",
+       "VINR4[SE] + VINR3[SE] + VINR2[SE]",
+       "VINR4[SE] + VINR3[SE] + VINR2[SE] + VINR1[SE]",
+       "{VIN2P, VIN2M}[DIFF]",
+       "{VIN3P, VIN3M}[DIFF]",
+       "{VIN2P, VIN2M}[DIFF] + {VIN3P, VIN3M}[DIFF]"
+};
+
+static const struct soc_enum pcm186x_adc_input_channel_sel[] = {
+       SOC_VALUE_ENUM_SINGLE(PCM186X_ADC1_INPUT_SEL_L, 0,
+                             PCM186X_ADC_INPUT_SEL_MASK,
+                             ARRAY_SIZE(pcm186x_adcl_input_channel_sel_text),
+                             pcm186x_adcl_input_channel_sel_text,
+                             pcm186x_adc_input_channel_sel_value),
+       SOC_VALUE_ENUM_SINGLE(PCM186X_ADC1_INPUT_SEL_R, 0,
+                             PCM186X_ADC_INPUT_SEL_MASK,
+                             ARRAY_SIZE(pcm186x_adcr_input_channel_sel_text),
+                             pcm186x_adcr_input_channel_sel_text,
+                             pcm186x_adc_input_channel_sel_value),
+       SOC_VALUE_ENUM_SINGLE(PCM186X_ADC2_INPUT_SEL_L, 0,
+                             PCM186X_ADC_INPUT_SEL_MASK,
+                             ARRAY_SIZE(pcm186x_adcl_input_channel_sel_text),
+                             pcm186x_adcl_input_channel_sel_text,
+                             pcm186x_adc_input_channel_sel_value),
+       SOC_VALUE_ENUM_SINGLE(PCM186X_ADC2_INPUT_SEL_R, 0,
+                             PCM186X_ADC_INPUT_SEL_MASK,
+                             ARRAY_SIZE(pcm186x_adcr_input_channel_sel_text),
+                             pcm186x_adcr_input_channel_sel_text,
+                             pcm186x_adc_input_channel_sel_value),
+};
+
+static const struct snd_kcontrol_new pcm186x_adc_mux_controls[] = {
+       SOC_DAPM_ENUM("ADC1 Left Input", pcm186x_adc_input_channel_sel[0]),
+       SOC_DAPM_ENUM("ADC1 Right Input", pcm186x_adc_input_channel_sel[1]),
+       SOC_DAPM_ENUM("ADC2 Left Input", pcm186x_adc_input_channel_sel[2]),
+       SOC_DAPM_ENUM("ADC2 Right Input", pcm186x_adc_input_channel_sel[3]),
+};
+
+static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
+       SND_SOC_DAPM_INPUT("VINL1"),
+       SND_SOC_DAPM_INPUT("VINR1"),
+       SND_SOC_DAPM_INPUT("VINL2"),
+       SND_SOC_DAPM_INPUT("VINR2"),
+       SND_SOC_DAPM_INPUT("VINL3"),
+       SND_SOC_DAPM_INPUT("VINR3"),
+       SND_SOC_DAPM_INPUT("VINL4"),
+       SND_SOC_DAPM_INPUT("VINR4"),
+
+       SND_SOC_DAPM_MUX("ADC Left Capture Source", SND_SOC_NOPM, 0, 0,
+                        &pcm186x_adc_mux_controls[0]),
+       SND_SOC_DAPM_MUX("ADC Right Capture Source", SND_SOC_NOPM, 0, 0,
+                        &pcm186x_adc_mux_controls[1]),
+
+       /*
+        * Put the codec into SLEEP mode when not in use, allowing the
+        * Energysense mechanism to operate.
+        */
+       SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1,  0),
+};
+
+static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
+       SND_SOC_DAPM_INPUT("VINL1"),
+       SND_SOC_DAPM_INPUT("VINR1"),
+       SND_SOC_DAPM_INPUT("VINL2"),
+       SND_SOC_DAPM_INPUT("VINR2"),
+       SND_SOC_DAPM_INPUT("VINL3"),
+       SND_SOC_DAPM_INPUT("VINR3"),
+       SND_SOC_DAPM_INPUT("VINL4"),
+       SND_SOC_DAPM_INPUT("VINR4"),
+
+       SND_SOC_DAPM_MUX("ADC1 Left Capture Source", SND_SOC_NOPM, 0, 0,
+                        &pcm186x_adc_mux_controls[0]),
+       SND_SOC_DAPM_MUX("ADC1 Right Capture Source", SND_SOC_NOPM, 0, 0,
+                        &pcm186x_adc_mux_controls[1]),
+       SND_SOC_DAPM_MUX("ADC2 Left Capture Source", SND_SOC_NOPM, 0, 0,
+                        &pcm186x_adc_mux_controls[2]),
+       SND_SOC_DAPM_MUX("ADC2 Right Capture Source", SND_SOC_NOPM, 0, 0,
+                        &pcm186x_adc_mux_controls[3]),
+
+       /*
+        * Put the codec into SLEEP mode when not in use, allowing the
+        * Energysense mechanism to operate.
+        */
+       SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1,  0),
+       SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1,  0),
+};
+
+static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
+       { "ADC Left Capture Source", NULL, "VINL1" },
+       { "ADC Left Capture Source", NULL, "VINR1" },
+       { "ADC Left Capture Source", NULL, "VINL2" },
+       { "ADC Left Capture Source", NULL, "VINR2" },
+       { "ADC Left Capture Source", NULL, "VINL3" },
+       { "ADC Left Capture Source", NULL, "VINR3" },
+       { "ADC Left Capture Source", NULL, "VINL4" },
+       { "ADC Left Capture Source", NULL, "VINR4" },
+
+       { "ADC", NULL, "ADC Left Capture Source" },
+
+       { "ADC Right Capture Source", NULL, "VINL1" },
+       { "ADC Right Capture Source", NULL, "VINR1" },
+       { "ADC Right Capture Source", NULL, "VINL2" },
+       { "ADC Right Capture Source", NULL, "VINR2" },
+       { "ADC Right Capture Source", NULL, "VINL3" },
+       { "ADC Right Capture Source", NULL, "VINR3" },
+       { "ADC Right Capture Source", NULL, "VINL4" },
+       { "ADC Right Capture Source", NULL, "VINR4" },
+
+       { "ADC", NULL, "ADC Right Capture Source" },
+};
+
+static const struct snd_soc_dapm_route pcm1865_dapm_routes[] = {
+       { "ADC1 Left Capture Source", NULL, "VINL1" },
+       { "ADC1 Left Capture Source", NULL, "VINR1" },
+       { "ADC1 Left Capture Source", NULL, "VINL2" },
+       { "ADC1 Left Capture Source", NULL, "VINR2" },
+       { "ADC1 Left Capture Source", NULL, "VINL3" },
+       { "ADC1 Left Capture Source", NULL, "VINR3" },
+       { "ADC1 Left Capture Source", NULL, "VINL4" },
+       { "ADC1 Left Capture Source", NULL, "VINR4" },
+
+       { "ADC1", NULL, "ADC1 Left Capture Source" },
+
+       { "ADC1 Right Capture Source", NULL, "VINL1" },
+       { "ADC1 Right Capture Source", NULL, "VINR1" },
+       { "ADC1 Right Capture Source", NULL, "VINL2" },
+       { "ADC1 Right Capture Source", NULL, "VINR2" },
+       { "ADC1 Right Capture Source", NULL, "VINL3" },
+       { "ADC1 Right Capture Source", NULL, "VINR3" },
+       { "ADC1 Right Capture Source", NULL, "VINL4" },
+       { "ADC1 Right Capture Source", NULL, "VINR4" },
+
+       { "ADC1", NULL, "ADC1 Right Capture Source" },
+
+       { "ADC2 Left Capture Source", NULL, "VINL1" },
+       { "ADC2 Left Capture Source", NULL, "VINR1" },
+       { "ADC2 Left Capture Source", NULL, "VINL2" },
+       { "ADC2 Left Capture Source", NULL, "VINR2" },
+       { "ADC2 Left Capture Source", NULL, "VINL3" },
+       { "ADC2 Left Capture Source", NULL, "VINR3" },
+       { "ADC2 Left Capture Source", NULL, "VINL4" },
+       { "ADC2 Left Capture Source", NULL, "VINR4" },
+
+       { "ADC2", NULL, "ADC2 Left Capture Source" },
+
+       { "ADC2 Right Capture Source", NULL, "VINL1" },
+       { "ADC2 Right Capture Source", NULL, "VINR1" },
+       { "ADC2 Right Capture Source", NULL, "VINL2" },
+       { "ADC2 Right Capture Source", NULL, "VINR2" },
+       { "ADC2 Right Capture Source", NULL, "VINL3" },
+       { "ADC2 Right Capture Source", NULL, "VINR3" },
+       { "ADC2 Right Capture Source", NULL, "VINL4" },
+       { "ADC2 Right Capture Source", NULL, "VINR4" },
+
+       { "ADC2", NULL, "ADC2 Right Capture Source" },
+};
+
+static int pcm186x_hw_params(struct snd_pcm_substream *substream,
+                            struct snd_pcm_hw_params *params,
+                            struct snd_soc_dai *dai)
+{
+       struct snd_soc_codec *codec = dai->codec;
+
+       struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+       unsigned int rate = params_rate(params);
+       unsigned int format = params_format(params);
+       unsigned int width = params_width(params);
+       unsigned int channels = params_channels(params);
+       unsigned int div_lrck;
+       unsigned int div_bck;
+       u8 tdm_tx_sel = 0;
+       u8 pcm_cfg = 0;
+
+       dev_dbg(codec->dev, "%s() rate=%u format=0x%x width=%u channels=%u\n",
+               __func__, rate, format, width, channels);
+
+       switch (width) {
+       case 16:
+               pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_16 <<
+                         PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+                         PCM186X_PCM_CFG_TX_WLEN_16 <<
+                         PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+               break;
+       case 20:
+               pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_20 <<
+                         PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+                         PCM186X_PCM_CFG_TX_WLEN_20 <<
+                         PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+               break;
+       case 24:
+               pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_24 <<
+                         PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+                         PCM186X_PCM_CFG_TX_WLEN_24 <<
+                         PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+               break;
+       case 32:
+               pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_32 <<
+                         PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+                         PCM186X_PCM_CFG_TX_WLEN_32 <<
+                         PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+                           PCM186X_PCM_CFG_RX_WLEN_MASK |
+                           PCM186X_PCM_CFG_TX_WLEN_MASK,
+                           pcm_cfg);
+
+       div_lrck = width * channels;
+
+       if (priv->is_tdm_mode) {
+               /* Select TDM transmission data */
+               switch (channels) {
+               case 2:
+                       tdm_tx_sel = PCM186X_TDM_TX_SEL_2CH;
+                       break;
+               case 4:
+                       tdm_tx_sel = PCM186X_TDM_TX_SEL_4CH;
+                       break;
+               case 6:
+                       tdm_tx_sel = PCM186X_TDM_TX_SEL_6CH;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               snd_soc_update_bits(codec, PCM186X_TDM_TX_SEL,
+                                   PCM186X_TDM_TX_SEL_MASK, tdm_tx_sel);
+
+               /* In DSP/TDM mode, the LRCLK divider must be 256 */
+               div_lrck = 256;
+
+               /* Configure 1/256 duty cycle for LRCK */
+               snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+                                   PCM186X_PCM_CFG_TDM_LRCK_MODE,
+                                   PCM186X_PCM_CFG_TDM_LRCK_MODE);
+       }
+
+       /* Only configure clock dividers in master mode. */
+       if (priv->is_master_mode) {
+               div_bck = priv->sysclk / (div_lrck * rate);
+
+               dev_dbg(codec->dev,
+                       "%s() master_clk=%u div_bck=%u div_lrck=%u\n",
+                       __func__, priv->sysclk, div_bck, div_lrck);
+
+               snd_soc_write(codec, PCM186X_BCK_DIV, div_bck - 1);
+               snd_soc_write(codec, PCM186X_LRK_DIV, div_lrck - 1);
+       }
+
+       return 0;
+}
+
+static int pcm186x_set_fmt(struct snd_soc_dai *dai, unsigned int format)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+       u8 clk_ctrl = 0;
+       u8 pcm_cfg = 0;
+
+       dev_dbg(codec->dev, "%s() format=0x%x\n", __func__, format);
+
+       /* set master/slave audio interface */
+       switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBM_CFM:
+               if (!priv->sysclk) {
+                       dev_err(codec->dev, "operating in master mode requires sysclock to be configured\n");
+                       return -EINVAL;
+               }
+               clk_ctrl |= PCM186X_CLK_CTRL_MST_MODE;
+               priv->is_master_mode = true;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFS:
+               priv->is_master_mode = false;
+               break;
+       default:
+               dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+               return -EINVAL;
+       }
+
+       /* set interface polarity */
+       switch (format & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+       default:
+               dev_err(codec->dev, "Inverted DAI clocks not supported\n");
+               return -EINVAL;
+       }
+
+       /* set interface format */
+       switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               pcm_cfg = PCM186X_PCM_CFG_FMT_I2S;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               pcm_cfg = PCM186X_PCM_CFG_FMT_LEFTJ;
+               break;
+       case SND_SOC_DAIFMT_DSP_A:
+               priv->tdm_offset += 1;
+               /* Fall through... DSP_A uses the same basic config as DSP_B
+                * except we need to shift the TDM output by one BCK cycle
+                */
+       case SND_SOC_DAIFMT_DSP_B:
+               priv->is_tdm_mode = true;
+               pcm_cfg = PCM186X_PCM_CFG_FMT_TDM;
+               break;
+       default:
+               dev_err(codec->dev, "Invalid DAI format\n");
+               return -EINVAL;
+       }
+
+       snd_soc_update_bits(codec, PCM186X_CLK_CTRL,
+                           PCM186X_CLK_CTRL_MST_MODE, clk_ctrl);
+
+       snd_soc_write(codec, PCM186X_TDM_TX_OFFSET, priv->tdm_offset);
+
+       snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+                           PCM186X_PCM_CFG_FMT_MASK, pcm_cfg);
+
+       return 0;
+}
+
+static int pcm186x_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+                               unsigned int rx_mask, int slots, int slot_width)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+       unsigned int first_slot, last_slot, tdm_offset;
+
+       dev_dbg(codec->dev,
+               "%s() tx_mask=0x%x rx_mask=0x%x slots=%d slot_width=%d\n",
+               __func__, tx_mask, rx_mask, slots, slot_width);
+
+       if (!tx_mask) {
+               dev_err(codec->dev, "tdm tx mask must not be 0\n");
+               return -EINVAL;
+       }
+
+       first_slot = __ffs(tx_mask);
+       last_slot = __fls(tx_mask);
+
+       if (last_slot - first_slot != hweight32(tx_mask) - 1) {
+               dev_err(codec->dev, "tdm tx mask must be contiguous\n");
+               return -EINVAL;
+       }
+
+       tdm_offset = first_slot * slot_width;
+
+       if (tdm_offset > 255) {
+               dev_err(codec->dev, "tdm tx slot selection out of bounds\n");
+               return -EINVAL;
+       }
+
+       priv->tdm_offset = tdm_offset;
+
+       return 0;
+}
+
+static int pcm186x_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+                                 unsigned int freq, int dir)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+       dev_dbg(codec->dev, "%s() clk_id=%d freq=%u dir=%d\n",
+               __func__, clk_id, freq, dir);
+
+       priv->sysclk = freq;
+
+       return 0;
+}
+
+static const struct snd_soc_dai_ops pcm186x_dai_ops = {
+       .set_sysclk = pcm186x_set_dai_sysclk,
+       .set_tdm_slot = pcm186x_set_tdm_slot,
+       .set_fmt = pcm186x_set_fmt,
+       .hw_params = pcm186x_hw_params,
+};
+
+static struct snd_soc_dai_driver pcm1863_dai = {
+       .name = "pcm1863-aif",
+       .capture = {
+                .stream_name = "Capture",
+                .channels_min = 1,
+                .channels_max = 2,
+                .rates = PCM186X_RATES,
+                .formats = PCM186X_FORMATS,
+        },
+       .ops = &pcm186x_dai_ops,
+};
+
+static struct snd_soc_dai_driver pcm1865_dai = {
+       .name = "pcm1865-aif",
+       .capture = {
+                .stream_name = "Capture",
+                .channels_min = 1,
+                .channels_max = 4,
+                .rates = PCM186X_RATES,
+                .formats = PCM186X_FORMATS,
+        },
+       .ops = &pcm186x_dai_ops,
+};
+
+static int pcm186x_power_on(struct snd_soc_codec *codec)
+{
+       struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+       int ret = 0;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
+                                   priv->supplies);
+       if (ret)
+               return ret;
+
+       regcache_cache_only(priv->regmap, false);
+       ret = regcache_sync(priv->regmap);
+       if (ret) {
+               dev_err(codec->dev, "Failed to restore cache\n");
+               regcache_cache_only(priv->regmap, true);
+               regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+                                      priv->supplies);
+               return ret;
+       }
+
+       snd_soc_update_bits(codec, PCM186X_POWER_CTRL,
+                           PCM186X_PWR_CTRL_PWRDN, 0);
+
+       return 0;
+}
+
+static int pcm186x_power_off(struct snd_soc_codec *codec)
+{
+       struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       snd_soc_update_bits(codec, PCM186X_POWER_CTRL,
+                           PCM186X_PWR_CTRL_PWRDN, PCM186X_PWR_CTRL_PWRDN);
+
+       regcache_cache_only(priv->regmap, true);
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+                                    priv->supplies);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int pcm186x_set_bias_level(struct snd_soc_codec *codec,
+                                 enum snd_soc_bias_level level)
+{
+       dev_dbg(codec->dev, "## %s: %d -> %d\n", __func__,
+               snd_soc_codec_get_bias_level(codec), level);
+
+       switch (level) {
+       case SND_SOC_BIAS_ON:
+               break;
+       case SND_SOC_BIAS_PREPARE:
+               break;
+       case SND_SOC_BIAS_STANDBY:
+               if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+                       pcm186x_power_on(codec);
+               break;
+       case SND_SOC_BIAS_OFF:
+               pcm186x_power_off(codec);
+               break;
+       }
+
+       return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1863 = {
+       .set_bias_level = pcm186x_set_bias_level,
+
+       .component_driver = {
+               .controls = pcm1863_snd_controls,
+               .num_controls = ARRAY_SIZE(pcm1863_snd_controls),
+               .dapm_widgets = pcm1863_dapm_widgets,
+               .num_dapm_widgets = ARRAY_SIZE(pcm1863_dapm_widgets),
+               .dapm_routes = pcm1863_dapm_routes,
+               .num_dapm_routes = ARRAY_SIZE(pcm1863_dapm_routes),
+       },
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1865 = {
+       .set_bias_level = pcm186x_set_bias_level,
+       .suspend_bias_off = true,
+
+       .component_driver = {
+               .controls = pcm1865_snd_controls,
+               .num_controls = ARRAY_SIZE(pcm1865_snd_controls),
+               .dapm_widgets = pcm1865_dapm_widgets,
+               .num_dapm_widgets = ARRAY_SIZE(pcm1865_dapm_widgets),
+               .dapm_routes = pcm1865_dapm_routes,
+               .num_dapm_routes = ARRAY_SIZE(pcm1865_dapm_routes),
+       },
+};
+
+static bool pcm186x_volatile(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case PCM186X_PAGE:
+       case PCM186X_DEVICE_STATUS:
+       case PCM186X_FSAMPLE_STATUS:
+       case PCM186X_DIV_STATUS:
+       case PCM186X_CLK_STATUS:
+       case PCM186X_SUPPLY_STATUS:
+       case PCM186X_MMAP_STAT_CTRL:
+       case PCM186X_MMAP_ADDRESS:
+               return true;
+       }
+
+       return false;
+}
+
+static const struct regmap_range_cfg pcm186x_range = {
+       .name = "Pages",
+       .range_max = PCM186X_MAX_REGISTER,
+       .selector_reg = PCM186X_PAGE,
+       .selector_mask = 0xff,
+       .window_len = PCM186X_PAGE_LEN,
+};
+
+const struct regmap_config pcm186x_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .volatile_reg = pcm186x_volatile,
+
+       .ranges = &pcm186x_range,
+       .num_ranges = 1,
+
+       .max_register = PCM186X_MAX_REGISTER,
+
+       .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(pcm186x_regmap);
+
+int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
+                 struct regmap *regmap)
+{
+       struct pcm186x_priv *priv;
+       int i, ret;
+
+       priv = devm_kzalloc(dev, sizeof(struct pcm186x_priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev_set_drvdata(dev, priv);
+       priv->regmap = regmap;
+
+       for (i = 0; i < ARRAY_SIZE(priv->supplies); i++)
+               priv->supplies[i].supply = pcm186x_supply_names[i];
+
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
+                                     priv->supplies);
+       if (ret) {
+               dev_err(dev, "failed to request supplies: %d\n", ret);
+               return ret;
+       }
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
+                                   priv->supplies);
+       if (ret) {
+               dev_err(dev, "failed enable supplies: %d\n", ret);
+               return ret;
+       }
+
+       /* Reset device registers for a consistent power-on like state */
+       ret = regmap_write(regmap, PCM186X_PAGE, PCM186X_RESET);
+       if (ret) {
+               dev_err(dev, "failed to write device: %d\n", ret);
+               return ret;
+       }
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+                                    priv->supplies);
+       if (ret) {
+               dev_err(dev, "failed disable supplies: %d\n", ret);
+               return ret;
+       }
+
+       switch (type) {
+       case PCM1865:
+       case PCM1864:
+               ret = snd_soc_register_codec(dev, &soc_codec_dev_pcm1865,
+                                            &pcm1865_dai, 1);
+               break;
+       case PCM1863:
+       case PCM1862:
+       default:
+               ret = snd_soc_register_codec(dev, &soc_codec_dev_pcm1863,
+                                            &pcm1863_dai, 1);
+       }
+       if (ret) {
+               dev_err(dev, "failed to register CODEC: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pcm186x_probe);
+
+int pcm186x_remove(struct device *dev)
+{
+       snd_soc_unregister_codec(dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pcm186x_remove);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x.h b/sound/soc/codecs/pcm186x.h
new file mode 100644 (file)
index 0000000..b630111
--- /dev/null
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ *     Andreas Dannenberg <dannenberg@ti.com>
+ *     Andrew F. Davis <afd@ti.com>
+ */
+
+#ifndef _PCM186X_H_
+#define _PCM186X_H_
+
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+enum pcm186x_type {
+       PCM1862,
+       PCM1863,
+       PCM1864,
+       PCM1865,
+};
+
+#define PCM186X_RATES  SNDRV_PCM_RATE_8000_192000
+#define PCM186X_FORMATS        (SNDRV_PCM_FMTBIT_S16_LE | \
+                        SNDRV_PCM_FMTBIT_S20_3LE |\
+                        SNDRV_PCM_FMTBIT_S24_LE | \
+                        SNDRV_PCM_FMTBIT_S32_LE)
+
+#define PCM186X_PAGE_LEN               0x0100
+#define PCM186X_PAGE_BASE(n)           (PCM186X_PAGE_LEN * n)
+
+/* The page selection register address is the same on all pages */
+#define PCM186X_PAGE                   0
+
+/* Register Definitions - Page 0 */
+#define PCM186X_PGA_VAL_CH1_L          (PCM186X_PAGE_BASE(0) +   1)
+#define PCM186X_PGA_VAL_CH1_R          (PCM186X_PAGE_BASE(0) +   2)
+#define PCM186X_PGA_VAL_CH2_L          (PCM186X_PAGE_BASE(0) +   3)
+#define PCM186X_PGA_VAL_CH2_R          (PCM186X_PAGE_BASE(0) +   4)
+#define PCM186X_PGA_CTRL               (PCM186X_PAGE_BASE(0) +   5)
+#define PCM186X_ADC1_INPUT_SEL_L       (PCM186X_PAGE_BASE(0) +   6)
+#define PCM186X_ADC1_INPUT_SEL_R       (PCM186X_PAGE_BASE(0) +   7)
+#define PCM186X_ADC2_INPUT_SEL_L       (PCM186X_PAGE_BASE(0) +   8)
+#define PCM186X_ADC2_INPUT_SEL_R       (PCM186X_PAGE_BASE(0) +   9)
+#define PCM186X_AUXADC_INPUT_SEL       (PCM186X_PAGE_BASE(0) +  10)
+#define PCM186X_PCM_CFG                        (PCM186X_PAGE_BASE(0) +  11)
+#define PCM186X_TDM_TX_SEL             (PCM186X_PAGE_BASE(0) +  12)
+#define PCM186X_TDM_TX_OFFSET          (PCM186X_PAGE_BASE(0) +  13)
+#define PCM186X_TDM_RX_OFFSET          (PCM186X_PAGE_BASE(0) +  14)
+#define PCM186X_DPGA_VAL_CH1_L         (PCM186X_PAGE_BASE(0) +  15)
+#define PCM186X_GPIO1_0_CTRL           (PCM186X_PAGE_BASE(0) +  16)
+#define PCM186X_GPIO3_2_CTRL           (PCM186X_PAGE_BASE(0) +  17)
+#define PCM186X_GPIO1_0_DIR_CTRL       (PCM186X_PAGE_BASE(0) +  18)
+#define PCM186X_GPIO3_2_DIR_CTRL       (PCM186X_PAGE_BASE(0) +  19)
+#define PCM186X_GPIO_IN_OUT            (PCM186X_PAGE_BASE(0) +  20)
+#define PCM186X_GPIO_PULL_CTRL         (PCM186X_PAGE_BASE(0) +  21)
+#define PCM186X_DPGA_VAL_CH1_R         (PCM186X_PAGE_BASE(0) +  22)
+#define PCM186X_DPGA_VAL_CH2_L         (PCM186X_PAGE_BASE(0) +  23)
+#define PCM186X_DPGA_VAL_CH2_R         (PCM186X_PAGE_BASE(0) +  24)
+#define PCM186X_DPGA_GAIN_CTRL         (PCM186X_PAGE_BASE(0) +  25)
+#define PCM186X_DPGA_MIC_CTRL          (PCM186X_PAGE_BASE(0) +  26)
+#define PCM186X_DIN_RESAMP_CTRL                (PCM186X_PAGE_BASE(0) +  27)
+#define PCM186X_CLK_CTRL               (PCM186X_PAGE_BASE(0) +  32)
+#define PCM186X_DSP1_CLK_DIV           (PCM186X_PAGE_BASE(0) +  33)
+#define PCM186X_DSP2_CLK_DIV           (PCM186X_PAGE_BASE(0) +  34)
+#define PCM186X_ADC_CLK_DIV            (PCM186X_PAGE_BASE(0) +  35)
+#define PCM186X_PLL_SCK_DIV            (PCM186X_PAGE_BASE(0) +  37)
+#define PCM186X_BCK_DIV                        (PCM186X_PAGE_BASE(0) +  38)
+#define PCM186X_LRK_DIV                        (PCM186X_PAGE_BASE(0) +  39)
+#define PCM186X_PLL_CTRL               (PCM186X_PAGE_BASE(0) +  40)
+#define PCM186X_PLL_P_DIV              (PCM186X_PAGE_BASE(0) +  41)
+#define PCM186X_PLL_R_DIV              (PCM186X_PAGE_BASE(0) +  42)
+#define PCM186X_PLL_J_DIV              (PCM186X_PAGE_BASE(0) +  43)
+#define PCM186X_PLL_D_DIV_LSB          (PCM186X_PAGE_BASE(0) +  44)
+#define PCM186X_PLL_D_DIV_MSB          (PCM186X_PAGE_BASE(0) +  45)
+#define PCM186X_SIGDET_MODE            (PCM186X_PAGE_BASE(0) +  48)
+#define PCM186X_SIGDET_MASK            (PCM186X_PAGE_BASE(0) +  49)
+#define PCM186X_SIGDET_STAT            (PCM186X_PAGE_BASE(0) +  50)
+#define PCM186X_SIGDET_LOSS_TIME       (PCM186X_PAGE_BASE(0) +  52)
+#define PCM186X_SIGDET_SCAN_TIME       (PCM186X_PAGE_BASE(0) +  53)
+#define PCM186X_SIGDET_INT_INTVL       (PCM186X_PAGE_BASE(0) +  54)
+#define PCM186X_SIGDET_DC_REF_CH1_L    (PCM186X_PAGE_BASE(0) +  64)
+#define PCM186X_SIGDET_DC_DIFF_CH1_L   (PCM186X_PAGE_BASE(0) +  65)
+#define PCM186X_SIGDET_DC_LEV_CH1_L    (PCM186X_PAGE_BASE(0) +  66)
+#define PCM186X_SIGDET_DC_REF_CH1_R    (PCM186X_PAGE_BASE(0) +  67)
+#define PCM186X_SIGDET_DC_DIFF_CH1_R   (PCM186X_PAGE_BASE(0) +  68)
+#define PCM186X_SIGDET_DC_LEV_CH1_R    (PCM186X_PAGE_BASE(0) +  69)
+#define PCM186X_SIGDET_DC_REF_CH2_L    (PCM186X_PAGE_BASE(0) +  70)
+#define PCM186X_SIGDET_DC_DIFF_CH2_L   (PCM186X_PAGE_BASE(0) +  71)
+#define PCM186X_SIGDET_DC_LEV_CH2_L    (PCM186X_PAGE_BASE(0) +  72)
+#define PCM186X_SIGDET_DC_REF_CH2_R    (PCM186X_PAGE_BASE(0) +  73)
+#define PCM186X_SIGDET_DC_DIFF_CH2_R   (PCM186X_PAGE_BASE(0) +  74)
+#define PCM186X_SIGDET_DC_LEV_CH2_R    (PCM186X_PAGE_BASE(0) +  75)
+#define PCM186X_SIGDET_DC_REF_CH3_L    (PCM186X_PAGE_BASE(0) +  76)
+#define PCM186X_SIGDET_DC_DIFF_CH3_L   (PCM186X_PAGE_BASE(0) +  77)
+#define PCM186X_SIGDET_DC_LEV_CH3_L    (PCM186X_PAGE_BASE(0) +  78)
+#define PCM186X_SIGDET_DC_REF_CH3_R    (PCM186X_PAGE_BASE(0) +  79)
+#define PCM186X_SIGDET_DC_DIFF_CH3_R   (PCM186X_PAGE_BASE(0) +  80)
+#define PCM186X_SIGDET_DC_LEV_CH3_R    (PCM186X_PAGE_BASE(0) +  81)
+#define PCM186X_SIGDET_DC_REF_CH4_L    (PCM186X_PAGE_BASE(0) +  82)
+#define PCM186X_SIGDET_DC_DIFF_CH4_L   (PCM186X_PAGE_BASE(0) +  83)
+#define PCM186X_SIGDET_DC_LEV_CH4_L    (PCM186X_PAGE_BASE(0) +  84)
+#define PCM186X_SIGDET_DC_REF_CH4_R    (PCM186X_PAGE_BASE(0) +  85)
+#define PCM186X_SIGDET_DC_DIFF_CH4_R   (PCM186X_PAGE_BASE(0) +  86)
+#define PCM186X_SIGDET_DC_LEV_CH4_R    (PCM186X_PAGE_BASE(0) +  87)
+#define PCM186X_AUXADC_DATA_CTRL       (PCM186X_PAGE_BASE(0) +  88)
+#define PCM186X_AUXADC_DATA_LSB                (PCM186X_PAGE_BASE(0) +  89)
+#define PCM186X_AUXADC_DATA_MSB                (PCM186X_PAGE_BASE(0) +  90)
+#define PCM186X_INT_ENABLE             (PCM186X_PAGE_BASE(0) +  96)
+#define PCM186X_INT_FLAG               (PCM186X_PAGE_BASE(0) +  97)
+#define PCM186X_INT_POL_WIDTH          (PCM186X_PAGE_BASE(0) +  98)
+#define PCM186X_POWER_CTRL             (PCM186X_PAGE_BASE(0) + 112)
+#define PCM186X_FILTER_MUTE_CTRL       (PCM186X_PAGE_BASE(0) + 113)
+#define PCM186X_DEVICE_STATUS          (PCM186X_PAGE_BASE(0) + 114)
+#define PCM186X_FSAMPLE_STATUS         (PCM186X_PAGE_BASE(0) + 115)
+#define PCM186X_DIV_STATUS             (PCM186X_PAGE_BASE(0) + 116)
+#define PCM186X_CLK_STATUS             (PCM186X_PAGE_BASE(0) + 117)
+#define PCM186X_SUPPLY_STATUS          (PCM186X_PAGE_BASE(0) + 120)
+
+/* Register Definitions - Page 1 */
+#define PCM186X_MMAP_STAT_CTRL         (PCM186X_PAGE_BASE(1) +   1)
+#define PCM186X_MMAP_ADDRESS           (PCM186X_PAGE_BASE(1) +   2)
+#define PCM186X_MEM_WDATA0             (PCM186X_PAGE_BASE(1) +   4)
+#define PCM186X_MEM_WDATA1             (PCM186X_PAGE_BASE(1) +   5)
+#define PCM186X_MEM_WDATA2             (PCM186X_PAGE_BASE(1) +   6)
+#define PCM186X_MEM_WDATA3             (PCM186X_PAGE_BASE(1) +   7)
+#define PCM186X_MEM_RDATA0             (PCM186X_PAGE_BASE(1) +   8)
+#define PCM186X_MEM_RDATA1             (PCM186X_PAGE_BASE(1) +   9)
+#define PCM186X_MEM_RDATA2             (PCM186X_PAGE_BASE(1) +  10)
+#define PCM186X_MEM_RDATA3             (PCM186X_PAGE_BASE(1) +  11)
+
+/* Register Definitions - Page 3 */
+#define PCM186X_OSC_PWR_DOWN_CTRL      (PCM186X_PAGE_BASE(3) +  18)
+#define PCM186X_MIC_BIAS_CTRL          (PCM186X_PAGE_BASE(3) +  21)
+
+/* Register Definitions - Page 253 */
+#define PCM186X_CURR_TRIM_CTRL         (PCM186X_PAGE_BASE(253) +  20)
+
+#define PCM186X_MAX_REGISTER           PCM186X_CURR_TRIM_CTRL
+
+/* PCM186X_PAGE */
+#define PCM186X_RESET                  0xff
+
+/* PCM186X_ADCX_INPUT_SEL_X */
+#define PCM186X_ADC_INPUT_SEL_POL      BIT(7)
+#define PCM186X_ADC_INPUT_SEL_MASK     GENMASK(5, 0)
+
+/* PCM186X_PCM_CFG */
+#define PCM186X_PCM_CFG_RX_WLEN_MASK   GENMASK(7, 6)
+#define PCM186X_PCM_CFG_RX_WLEN_SHIFT  6
+#define PCM186X_PCM_CFG_RX_WLEN_32     0x00
+#define PCM186X_PCM_CFG_RX_WLEN_24     0x01
+#define PCM186X_PCM_CFG_RX_WLEN_20     0x02
+#define PCM186X_PCM_CFG_RX_WLEN_16     0x03
+#define PCM186X_PCM_CFG_TDM_LRCK_MODE  BIT(4)
+#define PCM186X_PCM_CFG_TX_WLEN_MASK   GENMASK(3, 2)
+#define PCM186X_PCM_CFG_TX_WLEN_SHIFT  2
+#define PCM186X_PCM_CFG_TX_WLEN_32     0x00
+#define PCM186X_PCM_CFG_TX_WLEN_24     0x01
+#define PCM186X_PCM_CFG_TX_WLEN_20     0x02
+#define PCM186X_PCM_CFG_TX_WLEN_16     0x03
+#define PCM186X_PCM_CFG_FMT_MASK       GENMASK(1, 0)
+#define PCM186X_PCM_CFG_FMT_SHIFT      0
+#define PCM186X_PCM_CFG_FMT_I2S                0x00
+#define PCM186X_PCM_CFG_FMT_LEFTJ      0x01
+#define PCM186X_PCM_CFG_FMT_RIGHTJ     0x02
+#define PCM186X_PCM_CFG_FMT_TDM                0x03
+
+/* PCM186X_TDM_TX_SEL */
+#define PCM186X_TDM_TX_SEL_2CH         0x00
+#define PCM186X_TDM_TX_SEL_4CH         0x01
+#define PCM186X_TDM_TX_SEL_6CH         0x02
+#define PCM186X_TDM_TX_SEL_MASK                0x03
+
+/* PCM186X_CLK_CTRL */
+#define PCM186X_CLK_CTRL_SCK_XI_SEL1   BIT(7)
+#define PCM186X_CLK_CTRL_SCK_XI_SEL0   BIT(6)
+#define PCM186X_CLK_CTRL_SCK_SRC_PLL   BIT(5)
+#define PCM186X_CLK_CTRL_MST_MODE      BIT(4)
+#define PCM186X_CLK_CTRL_ADC_SRC_PLL   BIT(3)
+#define PCM186X_CLK_CTRL_DSP2_SRC_PLL  BIT(2)
+#define PCM186X_CLK_CTRL_DSP1_SRC_PLL  BIT(1)
+#define PCM186X_CLK_CTRL_CLKDET_EN     BIT(0)
+
+/* PCM186X_PLL_CTRL */
+#define PCM186X_PLL_CTRL_LOCK          BIT(4)
+#define PCM186X_PLL_CTRL_REF_SEL       BIT(1)
+#define PCM186X_PLL_CTRL_EN            BIT(0)
+
+/* PCM186X_POWER_CTRL */
+#define PCM186X_PWR_CTRL_PWRDN         BIT(2)
+#define PCM186X_PWR_CTRL_SLEEP         BIT(1)
+#define PCM186X_PWR_CTRL_STBY          BIT(0)
+
+/* PCM186X_CLK_STATUS */
+#define PCM186X_CLK_STATUS_LRCKHLT     BIT(6)
+#define PCM186X_CLK_STATUS_BCKHLT      BIT(5)
+#define PCM186X_CLK_STATUS_SCKHLT      BIT(4)
+#define PCM186X_CLK_STATUS_LRCKERR     BIT(2)
+#define PCM186X_CLK_STATUS_BCKERR      BIT(1)
+#define PCM186X_CLK_STATUS_SCKERR      BIT(0)
+
+/* PCM186X_SUPPLY_STATUS */
+#define PCM186X_SUPPLY_STATUS_DVDD     BIT(2)
+#define PCM186X_SUPPLY_STATUS_AVDD     BIT(1)
+#define PCM186X_SUPPLY_STATUS_LDO      BIT(0)
+
+/* PCM186X_MMAP_STAT_CTRL */
+#define PCM186X_MMAP_STAT_DONE         BIT(4)
+#define PCM186X_MMAP_STAT_BUSY         BIT(2)
+#define PCM186X_MMAP_STAT_R_REQ                BIT(1)
+#define PCM186X_MMAP_STAT_W_REQ                BIT(0)
+
+extern const struct regmap_config pcm186x_regmap;
+
+int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
+                 struct regmap *regmap);
+int pcm186x_remove(struct device *dev);
+
+#endif /* _PCM186X_H_ */
index 25c6351..7cdd2dc 100644 (file)
@@ -70,3 +70,7 @@ static struct spi_driver pcm512x_spi_driver = {
 };
 
 module_spi_driver(pcm512x_spi_driver);
+
+MODULE_DESCRIPTION("ASoC PCM512x codec driver - SPI");
+MODULE_AUTHOR("Mark Brown <broonie@kernel.org>");
+MODULE_LICENSE("GPL v2");
index 974a904..7ef3b54 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/regmap.h>
 
+#include <linux/gcd.h>
 #include "rl6231.h"
 
 /**
@@ -106,6 +107,25 @@ static const struct pll_calc_map pll_preset_table[] = {
        {19200000,  24576000,  3, 30, 3, false},
 };
 
+static unsigned int find_best_div(unsigned int in,
+       unsigned int max, unsigned int div)
+{
+       unsigned int d;
+
+       if (in <= max)
+               return 1;
+
+       d = in / max;
+       if (in % max)
+               d++;
+
+       while (div % d != 0)
+               d++;
+
+
+       return d;
+}
+
 /**
  * rl6231_pll_calc - Calcualte PLL M/N/K code.
  * @freq_in: external clock provided to codec.
@@ -120,9 +140,11 @@ int rl6231_pll_calc(const unsigned int freq_in,
        const unsigned int freq_out, struct rl6231_pll_code *pll_code)
 {
        int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX;
-       int i, k, red, n_t, pll_out, in_t, out_t;
-       int n = 0, m = 0, m_t = 0;
-       int red_t = abs(freq_out - freq_in);
+       int i, k, n_t;
+       int k_t, min_k, max_k, n = 0, m = 0, m_t = 0;
+       unsigned int red, pll_out, in_t, out_t, div, div_t;
+       unsigned int red_t = abs(freq_out - freq_in);
+       unsigned int f_in, f_out, f_max;
        bool bypass = false;
 
        if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
@@ -140,39 +162,52 @@ int rl6231_pll_calc(const unsigned int freq_in,
                }
        }
 
-       k = 100000000 / freq_out - 2;
-       if (k > RL6231_PLL_K_MAX)
-               k = RL6231_PLL_K_MAX;
-       for (n_t = 0; n_t <= max_n; n_t++) {
-               in_t = freq_in / (k + 2);
-               pll_out = freq_out / (n_t + 2);
-               if (in_t < 0)
-                       continue;
-               if (in_t == pll_out) {
-                       bypass = true;
-                       n = n_t;
-                       goto code_find;
-               }
-               red = abs(in_t - pll_out);
-               if (red < red_t) {
-                       bypass = true;
-                       n = n_t;
-                       m = m_t;
-                       if (red == 0)
+       min_k = 80000000 / freq_out - 2;
+       max_k = 150000000 / freq_out - 2;
+       if (max_k > RL6231_PLL_K_MAX)
+               max_k = RL6231_PLL_K_MAX;
+       if (min_k > RL6231_PLL_K_MAX)
+               min_k = max_k = RL6231_PLL_K_MAX;
+       div_t = gcd(freq_in, freq_out);
+       f_max = 0xffffffff / RL6231_PLL_N_MAX;
+       div = find_best_div(freq_in, f_max, div_t);
+       f_in = freq_in / div;
+       f_out = freq_out / div;
+       k = min_k;
+       for (k_t = min_k; k_t <= max_k; k_t++) {
+               for (n_t = 0; n_t <= max_n; n_t++) {
+                       in_t = f_in * (n_t + 2);
+                       pll_out = f_out * (k_t + 2);
+                       if (in_t == pll_out) {
+                               bypass = true;
+                               n = n_t;
+                               k = k_t;
                                goto code_find;
-                       red_t = red;
-               }
-               for (m_t = 0; m_t <= max_m; m_t++) {
-                       out_t = in_t / (m_t + 2);
-                       red = abs(out_t - pll_out);
+                       }
+                       out_t = in_t / (k_t + 2);
+                       red = abs(f_out - out_t);
                        if (red < red_t) {
-                               bypass = false;
+                               bypass = true;
                                n = n_t;
-                               m = m_t;
+                               m = 0;
+                               k = k_t;
                                if (red == 0)
                                        goto code_find;
                                red_t = red;
                        }
+                       for (m_t = 0; m_t <= max_m; m_t++) {
+                               out_t = in_t / ((m_t + 2) * (k_t + 2));
+                               red = abs(f_out - out_t);
+                               if (red < red_t) {
+                                       bypass = false;
+                                       n = n_t;
+                                       m = m_t;
+                                       k = k_t;
+                                       if (red == 0)
+                                               goto code_find;
+                                       red_t = red;
+                               }
+                       }
                }
        }
        pr_debug("Only get approximation about PLL\n");
index 2df91db..2144edc 100644 (file)
@@ -289,6 +289,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
                        dev_err(&rt5514_spi->dev,
                                "%s Failed to reguest IRQ: %d\n", __func__,
                                ret);
+               else
+                       device_init_wakeup(rt5514_dsp->dev, true);
        }
 
        return 0;
@@ -379,6 +381,7 @@ int rt5514_spi_burst_read(unsigned int addr, u8 *rxbuf, size_t len)
 
        return true;
 }
+EXPORT_SYMBOL_GPL(rt5514_spi_burst_read);
 
 /**
  * rt5514_spi_burst_write - Write data to SPI by rt5514 address.
@@ -456,8 +459,6 @@ static int rt5514_spi_probe(struct spi_device *spi)
                return ret;
        }
 
-       device_init_wakeup(&spi->dev, true);
-
        return 0;
 }
 
@@ -482,10 +483,13 @@ static int __maybe_unused rt5514_resume(struct device *dev)
        if (device_may_wakeup(dev))
                disable_irq_wake(irq);
 
-       if (rt5514_dsp->substream) {
-               rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, sizeof(buf));
-               if (buf[0] & RT5514_IRQ_STATUS_BIT)
-                       rt5514_schedule_copy(rt5514_dsp);
+       if (rt5514_dsp) {
+               if (rt5514_dsp->substream) {
+                       rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf,
+                               sizeof(buf));
+                       if (buf[0] & RT5514_IRQ_STATUS_BIT)
+                               rt5514_schedule_copy(rt5514_dsp);
+               }
        }
 
        return 0;
index 2a5b5d7..198df01 100644 (file)
@@ -295,6 +295,33 @@ static int rt5514_dsp_voice_wake_up_get(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
+static int rt5514_calibration(struct rt5514_priv *rt5514, bool on)
+{
+       if (on) {
+               regmap_write(rt5514->regmap, RT5514_ANA_CTRL_PLL3, 0x0000000a);
+               regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL, 0xf,
+                       0xa);
+               regmap_update_bits(rt5514->regmap, RT5514_PWR_ANA1, 0x301,
+                       0x301);
+               regmap_write(rt5514->regmap, RT5514_PLL3_CALIB_CTRL4,
+                       0x80000000 | rt5514->pll3_cal_value);
+               regmap_write(rt5514->regmap, RT5514_PLL3_CALIB_CTRL1,
+                       0x8bb80800);
+               regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+                       0xc0000000, 0x80000000);
+               regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+                       0xc0000000, 0xc0000000);
+       } else {
+               regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+                       0xc0000000, 0x40000000);
+               regmap_update_bits(rt5514->regmap, RT5514_PWR_ANA1, 0x301, 0);
+               regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL, 0xf,
+                       0x4);
+       }
+
+       return 0;
+}
+
 static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
                struct snd_ctl_elem_value *ucontrol)
 {
@@ -302,6 +329,7 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
        struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
        struct snd_soc_codec *codec = rt5514->codec;
        const struct firmware *fw = NULL;
+       u8 buf[8];
 
        if (ucontrol->value.integer.value[0] == rt5514->dsp_enabled)
                return 0;
@@ -310,6 +338,35 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
                rt5514->dsp_enabled = ucontrol->value.integer.value[0];
 
                if (rt5514->dsp_enabled) {
+                       if (rt5514->pdata.dsp_calib_clk_name &&
+                               !IS_ERR(rt5514->dsp_calib_clk)) {
+                               if (clk_set_rate(rt5514->dsp_calib_clk,
+                                       rt5514->pdata.dsp_calib_clk_rate))
+                                       dev_err(codec->dev,
+                                               "Can't set rate for mclk");
+
+                               if (clk_prepare_enable(rt5514->dsp_calib_clk))
+                                       dev_err(codec->dev,
+                                               "Can't enable dsp_calib_clk");
+
+                               rt5514_calibration(rt5514, true);
+
+                               msleep(20);
+#if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI)
+                               rt5514_spi_burst_read(RT5514_PLL3_CALIB_CTRL6 |
+                                       RT5514_DSP_MAPPING,
+                                       (u8 *)&buf, sizeof(buf));
+#else
+                               dev_err(codec->dev, "There is no SPI driver for"
+                                       " loading the firmware\n");
+#endif
+                               rt5514->pll3_cal_value = buf[0] | buf[1] << 8 |
+                                       buf[2] << 16 | buf[3] << 24;
+
+                               rt5514_calibration(rt5514, false);
+                               clk_disable_unprepare(rt5514->dsp_calib_clk);
+                       }
+
                        rt5514_enable_dsp_prepare(rt5514);
 
                        request_firmware(&fw, RT5514_FIRMWARE1, codec->dev);
@@ -341,6 +398,20 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
                        /* DSP run */
                        regmap_write(rt5514->i2c_regmap, 0x18002f00,
                                0x00055148);
+
+                       if (rt5514->pdata.dsp_calib_clk_name &&
+                               !IS_ERR(rt5514->dsp_calib_clk)) {
+                               msleep(20);
+
+                               regmap_write(rt5514->i2c_regmap, 0x1800211c,
+                                       rt5514->pll3_cal_value);
+                               regmap_write(rt5514->i2c_regmap, 0x18002124,
+                                       0x00220012);
+                               regmap_write(rt5514->i2c_regmap, 0x18002124,
+                                       0x80220042);
+                               regmap_write(rt5514->i2c_regmap, 0x18002124,
+                                       0xe0220042);
+                       }
                } else {
                        regmap_multi_reg_write(rt5514->i2c_regmap,
                                rt5514_i2c_patch, ARRAY_SIZE(rt5514_i2c_patch));
@@ -496,7 +567,7 @@ static const struct snd_soc_dapm_widget rt5514_dapm_widgets[] = {
        SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0),
 
-       SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
+       SND_SOC_DAPM_SUPPLY_S("DMIC CLK", 1, SND_SOC_NOPM, 0, 0,
                rt5514_set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
 
        SND_SOC_DAPM_SUPPLY("ADC CLK", RT5514_CLK_CTRL1,
@@ -1024,12 +1095,22 @@ static int rt5514_set_bias_level(struct snd_soc_codec *codec,
 static int rt5514_probe(struct snd_soc_codec *codec)
 {
        struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+       struct platform_device *pdev = container_of(codec->dev,
+                                                  struct platform_device, dev);
 
        rt5514->mclk = devm_clk_get(codec->dev, "mclk");
        if (PTR_ERR(rt5514->mclk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
+       if (rt5514->pdata.dsp_calib_clk_name) {
+               rt5514->dsp_calib_clk = devm_clk_get(&pdev->dev,
+                               rt5514->pdata.dsp_calib_clk_name);
+               if (PTR_ERR(rt5514->dsp_calib_clk) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+       }
+
        rt5514->codec = codec;
+       rt5514->pll3_cal_value = 0x0078b000;
 
        return 0;
 }
@@ -1147,6 +1228,10 @@ static int rt5514_parse_dp(struct rt5514_priv *rt5514, struct device *dev)
 {
        device_property_read_u32(dev, "realtek,dmic-init-delay-ms",
                &rt5514->pdata.dmic_init_delay);
+       device_property_read_string(dev, "realtek,dsp-calib-clk-name",
+               &rt5514->pdata.dsp_calib_clk_name);
+       device_property_read_u32(dev, "realtek,dsp-calib-clk-rate",
+               &rt5514->pdata.dsp_calib_clk_rate);
 
        return 0;
 }
index 2dc40e6..f0f3400 100644 (file)
@@ -34,7 +34,9 @@
 #define RT5514_CLK_CTRL1                       0x2104
 #define RT5514_CLK_CTRL2                       0x2108
 #define RT5514_PLL3_CALIB_CTRL1                        0x2110
+#define RT5514_PLL3_CALIB_CTRL4                        0x2120
 #define RT5514_PLL3_CALIB_CTRL5                        0x2124
+#define RT5514_PLL3_CALIB_CTRL6                        0x2128
 #define RT5514_DELAY_BUF_CTRL1                 0x2140
 #define RT5514_DELAY_BUF_CTRL3                 0x2148
 #define RT5514_ASRC_IN_CTRL1                   0x2180
@@ -272,7 +274,7 @@ struct rt5514_priv {
        struct rt5514_platform_data pdata;
        struct snd_soc_codec *codec;
        struct regmap *i2c_regmap, *regmap;
-       struct clk *mclk;
+       struct clk *mclk, *dsp_calib_clk;
        int sysclk;
        int sysclk_src;
        int lrck;
@@ -281,6 +283,7 @@ struct rt5514_priv {
        int pll_in;
        int pll_out;
        int dsp_enabled;
+       unsigned int pll3_cal_value;
 };
 
 #endif /* __RT5514_H__ */
index f020d2d..8f140c8 100644 (file)
@@ -1943,6 +1943,56 @@ static int rt5650_hp_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
+static int rt5645_set_micbias1_event(struct snd_soc_dapm_widget *w,
+               struct snd_kcontrol *k, int  event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+                       RT5645_MICBIAS1_POW_CTRL_SEL_MASK,
+                       RT5645_MICBIAS1_POW_CTRL_SEL_M);
+               break;
+
+       case SND_SOC_DAPM_POST_PMD:
+               snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+                       RT5645_MICBIAS1_POW_CTRL_SEL_MASK,
+                       RT5645_MICBIAS1_POW_CTRL_SEL_A);
+               break;
+
+       default:
+               return 0;
+       }
+
+       return 0;
+}
+
+static int rt5645_set_micbias2_event(struct snd_soc_dapm_widget *w,
+               struct snd_kcontrol *k, int  event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+                       RT5645_MICBIAS2_POW_CTRL_SEL_MASK,
+                       RT5645_MICBIAS2_POW_CTRL_SEL_M);
+               break;
+
+       case SND_SOC_DAPM_POST_PMD:
+               snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+                       RT5645_MICBIAS2_POW_CTRL_SEL_MASK,
+                       RT5645_MICBIAS2_POW_CTRL_SEL_A);
+               break;
+
+       default:
+               return 0;
+       }
+
+       return 0;
+}
+
 static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER,
                RT5645_PWR_LDO2_BIT, 0, NULL, 0),
@@ -1980,10 +2030,12 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
 
        /* Input Side */
        /* micbias */
-       SND_SOC_DAPM_MICBIAS("micbias1", RT5645_PWR_ANLG2,
-                       RT5645_PWR_MB1_BIT, 0),
-       SND_SOC_DAPM_MICBIAS("micbias2", RT5645_PWR_ANLG2,
-                       RT5645_PWR_MB2_BIT, 0),
+       SND_SOC_DAPM_SUPPLY("micbias1", RT5645_PWR_ANLG2,
+                       RT5645_PWR_MB1_BIT, 0, rt5645_set_micbias1_event,
+                       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+       SND_SOC_DAPM_SUPPLY("micbias2", RT5645_PWR_ANLG2,
+                       RT5645_PWR_MB2_BIT, 0, rt5645_set_micbias2_event,
+                       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
        /* Input Lines */
        SND_SOC_DAPM_INPUT("DMIC L1"),
        SND_SOC_DAPM_INPUT("DMIC R1"),
@@ -3394,6 +3446,9 @@ static int rt5645_probe(struct snd_soc_codec *codec)
                snd_soc_dapm_sync(dapm);
        }
 
+       if (rt5645->pdata.long_name)
+               codec->component.card->long_name = rt5645->pdata.long_name;
+
        rt5645->eq_param = devm_kzalloc(codec->dev,
                RT5645_HWEQ_NUM * sizeof(struct rt5645_eq_param_s), GFP_KERNEL);
 
@@ -3570,63 +3625,74 @@ static const struct acpi_device_id rt5645_acpi_match[] = {
 MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
 #endif
 
-static const struct rt5645_platform_data general_platform_data = {
+static const struct rt5645_platform_data intel_braswell_platform_data = {
        .dmic1_data_pin = RT5645_DMIC1_DISABLE,
        .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
        .jd_mode = 3,
 };
 
-static const struct dmi_system_id dmi_platform_intel_braswell[] = {
+static const struct rt5645_platform_data buddy_platform_data = {
+       .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
+       .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
+       .jd_mode = 3,
+       .level_trigger_irq = true,
+};
+
+static const struct rt5645_platform_data gpd_win_platform_data = {
+       .jd_mode = 3,
+       .inv_jd1_1 = true,
+       .long_name = "gpd-win-pocket-rt5645",
+       /* The GPD pocket has a diff. mic, for the win this does not matter. */
+       .in2_diff = true,
+};
+
+static const struct rt5645_platform_data asus_t100ha_platform_data = {
+       .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
+       .dmic2_data_pin = RT5645_DMIC2_DISABLE,
+       .jd_mode = 3,
+       .inv_jd1_1 = true,
+};
+
+static const struct rt5645_platform_data jd_mode3_platform_data = {
+       .jd_mode = 3,
+};
+
+static const struct dmi_system_id dmi_platform_data[] = {
+       {
+               .ident = "Chrome Buddy",
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Buddy"),
+               },
+               .driver_data = (void *)&buddy_platform_data,
+       },
        {
                .ident = "Intel Strago",
                .matches = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Strago"),
                },
+               .driver_data = (void *)&intel_braswell_platform_data,
        },
        {
                .ident = "Google Chrome",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                },
+               .driver_data = (void *)&intel_braswell_platform_data,
        },
        {
                .ident = "Google Setzer",
                .matches = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
                },
+               .driver_data = (void *)&intel_braswell_platform_data,
        },
        {
                .ident = "Microsoft Surface 3",
                .matches = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
                },
+               .driver_data = (void *)&intel_braswell_platform_data,
        },
-       { }
-};
-
-static const struct rt5645_platform_data buddy_platform_data = {
-       .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
-       .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
-       .jd_mode = 3,
-       .level_trigger_irq = true,
-};
-
-static const struct dmi_system_id dmi_platform_intel_broadwell[] = {
-       {
-               .ident = "Chrome Buddy",
-               .matches = {
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Buddy"),
-               },
-       },
-       { }
-};
-
-static const struct rt5645_platform_data gpd_win_platform_data = {
-       .jd_mode = 3,
-       .inv_jd1_1 = true,
-};
-
-static const struct dmi_system_id dmi_platform_gpd_win[] = {
        {
                /*
                 * Match for the GPDwin which unfortunately uses somewhat
@@ -3637,46 +3703,38 @@ static const struct dmi_system_id dmi_platform_gpd_win[] = {
                 * the same default product_name. Also the GPDwin is the
                 * only device to have both board_ and product_name not set.
                 */
-               .ident = "GPD Win",
+               .ident = "GPD Win / Pocket",
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
                        DMI_MATCH(DMI_BOARD_NAME, "Default string"),
                        DMI_MATCH(DMI_BOARD_SERIAL, "Default string"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
                },
+               .driver_data = (void *)&gpd_win_platform_data,
        },
-       {}
-};
-
-static const struct rt5645_platform_data general_platform_data2 = {
-       .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
-       .dmic2_data_pin = RT5645_DMIC2_DISABLE,
-       .jd_mode = 3,
-       .inv_jd1_1 = true,
-};
-
-static const struct dmi_system_id dmi_platform_asus_t100ha[] = {
        {
                .ident = "ASUS T100HAN",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
                },
+               .driver_data = (void *)&asus_t100ha_platform_data,
        },
-       { }
-};
-
-static const struct rt5645_platform_data minix_z83_4_platform_data = {
-       .jd_mode = 3,
-};
-
-static const struct dmi_system_id dmi_platform_minix_z83_4[] = {
        {
                .ident = "MINIX Z83-4",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MINIX"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
                },
+               .driver_data = (void *)&jd_mode3_platform_data,
+       },
+       {
+               .ident = "Teclast X80 Pro",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X80 Pro"),
+               },
+               .driver_data = (void *)&jd_mode3_platform_data,
        },
        { }
 };
@@ -3684,9 +3742,9 @@ static const struct dmi_system_id dmi_platform_minix_z83_4[] = {
 static bool rt5645_check_dp(struct device *dev)
 {
        if (device_property_present(dev, "realtek,in2-differential") ||
-               device_property_present(dev, "realtek,dmic1-data-pin") ||
-               device_property_present(dev, "realtek,dmic2-data-pin") ||
-               device_property_present(dev, "realtek,jd-mode"))
+           device_property_present(dev, "realtek,dmic1-data-pin") ||
+           device_property_present(dev, "realtek,dmic2-data-pin") ||
+           device_property_present(dev, "realtek,jd-mode"))
                return true;
 
        return false;
@@ -3710,6 +3768,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
                    const struct i2c_device_id *id)
 {
        struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev);
+       const struct dmi_system_id *dmi_data;
        struct rt5645_priv *rt5645;
        int ret, i;
        unsigned int val;
@@ -3723,20 +3782,18 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
        rt5645->i2c = i2c;
        i2c_set_clientdata(i2c, rt5645);
 
+       dmi_data = dmi_first_match(dmi_platform_data);
+       if (dmi_data) {
+               dev_info(&i2c->dev, "Detected %s platform\n", dmi_data->ident);
+               pdata = dmi_data->driver_data;
+       }
+
        if (pdata)
                rt5645->pdata = *pdata;
-       else if (dmi_check_system(dmi_platform_intel_broadwell))
-               rt5645->pdata = buddy_platform_data;
        else if (rt5645_check_dp(&i2c->dev))
                rt5645_parse_dt(rt5645, &i2c->dev);
-       else if (dmi_check_system(dmi_platform_intel_braswell))
-               rt5645->pdata = general_platform_data;
-       else if (dmi_check_system(dmi_platform_gpd_win))
-               rt5645->pdata = gpd_win_platform_data;
-       else if (dmi_check_system(dmi_platform_asus_t100ha))
-               rt5645->pdata = general_platform_data2;
-       else if (dmi_check_system(dmi_platform_minix_z83_4))
-               rt5645->pdata = minix_z83_4_platform_data;
+       else
+               rt5645->pdata = jd_mode3_platform_data;
 
        if (quirk != -1) {
                rt5645->pdata.in2_diff = QUIRK_IN2_DIFF(quirk);
@@ -3823,6 +3880,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
        regmap_read(regmap, RT5645_VENDOR_ID, &val);
        rt5645->v_id = val & 0xff;
 
+       regmap_write(rt5645->regmap, RT5645_AD_DA_MIXER, 0x8080);
+
        ret = regmap_register_patch(rt5645->regmap, init_list,
                                    ARRAY_SIZE(init_list));
        if (ret != 0)
index cfc5f97..940325b 100644 (file)
@@ -2117,6 +2117,12 @@ enum {
 #define RT5645_RXDC_SRC_STO                    (0x0 << 7)
 #define RT5645_RXDC_SRC_MONO                   (0x1 << 7)
 #define RT5645_RXDC_SRC_SFT                    (7)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_MASK      (0x1 << 5)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_A         (0x0 << 5)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_M         (0x1 << 5)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_MASK      (0x1 << 4)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_A         (0x0 << 4)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_M         (0x1 << 4)
 #define RT5645_RXDP2_SEL_MASK                  (0x1 << 3)
 #define RT5645_RXDP2_SEL_IF2                   (0x0 << 3)
 #define RT5645_RXDP2_SEL_ADC                   (0x1 << 3)
index b036c9d..d329bf7 100644 (file)
@@ -1560,6 +1560,10 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
                        RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN);
                snd_soc_update_bits(codec, RT5663_IRQ_1,
                        RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
+               snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
+                       RT5663_EM_JD_MASK, RT5663_EM_JD_RST);
+               snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
+                       RT5663_EM_JD_MASK, RT5663_EM_JD_NOR);
 
                while (true) {
                        regmap_read(rt5663->regmap, RT5663_INT_ST_2, &val);
index c5a9b69..03adc80 100644 (file)
 #define RT5663_POL_EXT_JD_SHIFT                        10
 #define RT5663_POL_EXT_JD_EN                   (0x1 << 10)
 #define RT5663_POL_EXT_JD_DIS                  (0x0 << 10)
+#define RT5663_EM_JD_MASK                      (0x1 << 7)
+#define RT5663_EM_JD_SHIFT                     7
+#define RT5663_EM_JD_NOR                       (0x1 << 7)
+#define RT5663_EM_JD_RST                       (0x0 << 7)
 
 /* DACREF LDO Control (0x0112)*/
 #define RT5663_PWR_LDO_DACREFL_MASK            (0x1 << 9)
index f2bb4fe..633cdcf 100644 (file)
@@ -1332,10 +1332,13 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
        sgtl5000->mclk = devm_clk_get(&client->dev, NULL);
        if (IS_ERR(sgtl5000->mclk)) {
                ret = PTR_ERR(sgtl5000->mclk);
-               dev_err(&client->dev, "Failed to get mclock: %d\n", ret);
                /* Defer the probe to see if the clk will be provided later */
                if (ret == -ENOENT)
                        ret = -EPROBE_DEFER;
+
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&client->dev, "Failed to get mclock: %d\n",
+                               ret);
                goto disable_regs;
        }
 
index 354dc0d..7b91ee2 100644 (file)
@@ -231,14 +231,17 @@ static struct snd_soc_dai_driver si476x_dai = {
        .ops            = &si476x_dai_ops,
 };
 
-static struct regmap *si476x_get_regmap(struct device *dev)
+static int si476x_probe(struct snd_soc_component *component)
 {
-       return dev_get_regmap(dev->parent, NULL);
+       snd_soc_component_init_regmap(component,
+                               dev_get_regmap(component->dev->parent, NULL));
+
+       return 0;
 }
 
 static const struct snd_soc_codec_driver soc_codec_dev_si476x = {
-       .get_regmap = si476x_get_regmap,
        .component_driver = {
+               .probe                  = si476x_probe,
                .dapm_widgets           = si476x_dapm_widgets,
                .num_dapm_widgets       = ARRAY_SIZE(si476x_dapm_widgets),
                .dapm_routes            = si476x_dapm_routes,
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
deleted file mode 100644 (file)
index 887923e..0000000
+++ /dev/null
@@ -1,936 +0,0 @@
-/*
- *  sn95031.c -  TI sn95031 Codec driver
- *
- *  Copyright (C) 2010 Intel Corp
- *  Author: Vinod Koul <vinod.koul@intel.com>
- *  Author: Harsha Priya <priya.harsha@intel.com>
- *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <asm/intel_scu_ipc.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/initval.h>
-#include <sound/tlv.h>
-#include <sound/jack.h>
-#include "sn95031.h"
-
-#define SN95031_RATES (SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_44100)
-#define SN95031_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
-
-/* adc helper functions */
-
-/* enables mic bias voltage */
-static void sn95031_enable_mic_bias(struct snd_soc_codec *codec)
-{
-       snd_soc_write(codec, SN95031_VAUD, BIT(2)|BIT(1)|BIT(0));
-       snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(2), BIT(2));
-}
-
-/* Enable/Disable the ADC depending on the argument */
-static void configure_adc(struct snd_soc_codec *sn95031_codec, int val)
-{
-       int value = snd_soc_read(sn95031_codec, SN95031_ADC1CNTL1);
-
-       if (val) {
-               /* Enable and start the ADC */
-               value |= (SN95031_ADC_ENBL | SN95031_ADC_START);
-               value &= (~SN95031_ADC_NO_LOOP);
-       } else {
-               /* Just stop the ADC */
-               value &= (~SN95031_ADC_START);
-       }
-       snd_soc_write(sn95031_codec, SN95031_ADC1CNTL1, value);
-}
-
-/*
- * finds an empty channel for conversion
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- */
-static int find_free_channel(struct snd_soc_codec *sn95031_codec)
-{
-       int i, value;
-
-       /* check whether ADC is enabled */
-       value = snd_soc_read(sn95031_codec, SN95031_ADC1CNTL1);
-
-       if ((value & SN95031_ADC_ENBL) == 0)
-               return 0;
-
-       /* ADC is already enabled; Looking for an empty channel */
-       for (i = 0; i < SN95031_ADC_CHANLS_MAX; i++) {
-               value = snd_soc_read(sn95031_codec,
-                               SN95031_ADC_CHNL_START_ADDR + i);
-               if (value & SN95031_STOPBIT_MASK)
-                       break;
-       }
-       return (i == SN95031_ADC_CHANLS_MAX) ? (-EINVAL) : i;
-}
-
-/* Initialize the ADC for reading micbias values. Can sleep. */
-static int sn95031_initialize_adc(struct snd_soc_codec *sn95031_codec)
-{
-       int base_addr, chnl_addr;
-       int value;
-       int channel_index;
-
-       /* Index of the first channel in which the stop bit is set */
-       channel_index = find_free_channel(sn95031_codec);
-       if (channel_index < 0) {
-               pr_err("No free ADC channels");
-               return channel_index;
-       }
-
-       base_addr = SN95031_ADC_CHNL_START_ADDR + channel_index;
-
-       if (!(channel_index == 0 || channel_index ==  SN95031_ADC_LOOP_MAX)) {
-               /* Reset stop bit for channels other than 0 and 12 */
-               value = snd_soc_read(sn95031_codec, base_addr);
-               /* Set the stop bit to zero */
-               snd_soc_write(sn95031_codec, base_addr, value & 0xEF);
-               /* Index of the first free channel */
-               base_addr++;
-               channel_index++;
-       }
-
-       /* Since this is the last channel, set the stop bit
-          to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
-       snd_soc_write(sn95031_codec, base_addr,
-                               SN95031_AUDIO_DETECT_CODE | 0x10);
-
-       chnl_addr = SN95031_ADC_DATA_START_ADDR + 2 * channel_index;
-       pr_debug("mid_initialize : %x", chnl_addr);
-       configure_adc(sn95031_codec, 1);
-       return chnl_addr;
-}
-
-
-/* reads the ADC registers and gets the mic bias value in mV. */
-static unsigned int sn95031_get_mic_bias(struct snd_soc_codec *codec)
-{
-       u16 adc_adr = sn95031_initialize_adc(codec);
-       u16 adc_val1, adc_val2;
-       unsigned int mic_bias;
-
-       sn95031_enable_mic_bias(codec);
-
-       /* Enable the sound card for conversion before reading */
-       snd_soc_write(codec, SN95031_ADC1CNTL3, 0x05);
-       /* Re-toggle the RRDATARD bit */
-       snd_soc_write(codec, SN95031_ADC1CNTL3, 0x04);
-
-       /* Read the higher bits of data */
-       msleep(1000);
-       adc_val1 = snd_soc_read(codec, adc_adr);
-       adc_adr++;
-       adc_val2 = snd_soc_read(codec, adc_adr);
-
-       /* Adding lower two bits to the higher bits */
-       mic_bias = (adc_val1 << 2) + (adc_val2 & 3);
-       mic_bias = (mic_bias * SN95031_ADC_ONE_LSB_MULTIPLIER) / 1000;
-       pr_debug("mic bias = %dmV\n", mic_bias);
-       return mic_bias;
-}
-/*end - adc helper functions */
-
-static int sn95031_read(void *ctx, unsigned int reg, unsigned int *val)
-{
-       u8 value = 0;
-       int ret;
-
-       ret = intel_scu_ipc_ioread8(reg, &value);
-       if (ret == 0)
-               *val = value;
-
-       return ret;
-}
-
-static int sn95031_write(void *ctx, unsigned int reg, unsigned int value)
-{
-       return intel_scu_ipc_iowrite8(reg, value);
-}
-
-static const struct regmap_config sn95031_regmap = {
-       .reg_read = sn95031_read,
-       .reg_write = sn95031_write,
-};
-
-static int sn95031_set_vaud_bias(struct snd_soc_codec *codec,
-               enum snd_soc_bias_level level)
-{
-       switch (level) {
-       case SND_SOC_BIAS_ON:
-               break;
-
-       case SND_SOC_BIAS_PREPARE:
-               if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_STANDBY) {
-                       pr_debug("vaud_bias powering up pll\n");
-                       /* power up the pll */
-                       snd_soc_write(codec, SN95031_AUDPLLCTRL, BIT(5));
-                       /* enable pcm 2 */
-                       snd_soc_update_bits(codec, SN95031_PCM2C2,
-                                       BIT(0), BIT(0));
-               }
-               break;
-
-       case SND_SOC_BIAS_STANDBY:
-               switch (snd_soc_codec_get_bias_level(codec)) {
-               case SND_SOC_BIAS_OFF:
-                       pr_debug("vaud_bias power up rail\n");
-                       /* power up the rail */
-                       snd_soc_write(codec, SN95031_VAUD,
-                                       BIT(2)|BIT(1)|BIT(0));
-                       msleep(1);
-                       break;
-               case SND_SOC_BIAS_PREPARE:
-                       /* turn off pcm */
-                       pr_debug("vaud_bias power dn pcm\n");
-                       snd_soc_update_bits(codec, SN95031_PCM2C2, BIT(0), 0);
-                       snd_soc_write(codec, SN95031_AUDPLLCTRL, 0);
-                       break;
-               default:
-                       break;
-               }
-               break;
-
-
-       case SND_SOC_BIAS_OFF:
-               pr_debug("vaud_bias _OFF doing rail shutdown\n");
-               snd_soc_write(codec, SN95031_VAUD, BIT(3));
-               break;
-       }
-
-       return 0;
-}
-
-static int sn95031_vhs_event(struct snd_soc_dapm_widget *w,
-                   struct snd_kcontrol *kcontrol, int event)
-{
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-       if (SND_SOC_DAPM_EVENT_ON(event)) {
-               pr_debug("VHS SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
-               /* power up the rail */
-               snd_soc_write(codec, SN95031_VHSP, 0x3D);
-               snd_soc_write(codec, SN95031_VHSN, 0x3F);
-               msleep(1);
-       } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
-               pr_debug("VHS SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
-               snd_soc_write(codec, SN95031_VHSP, 0xC4);
-               snd_soc_write(codec, SN95031_VHSN, 0x04);
-       }
-       return 0;
-}
-
-static int sn95031_vihf_event(struct snd_soc_dapm_widget *w,
-                   struct snd_kcontrol *kcontrol, int event)
-{
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-       if (SND_SOC_DAPM_EVENT_ON(event)) {
-               pr_debug("VIHF SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
-               /* power up the rail */
-               snd_soc_write(codec, SN95031_VIHF, 0x27);
-               msleep(1);
-       } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
-               pr_debug("VIHF SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
-               snd_soc_write(codec, SN95031_VIHF, 0x24);
-       }
-       return 0;
-}
-
-static int sn95031_dmic12_event(struct snd_soc_dapm_widget *w,
-                       struct snd_kcontrol *k, int event)
-{
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-       unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
-
-       if (SND_SOC_DAPM_EVENT_ON(event)) {
-               ldo = BIT(5)|BIT(4);
-               clk_dir = BIT(0);
-               data_dir = BIT(7);
-       }
-       /* program DMIC LDO, clock and set clock */
-       snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
-       snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(0), clk_dir);
-       snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(7), data_dir);
-       return 0;
-}
-
-static int sn95031_dmic34_event(struct snd_soc_dapm_widget *w,
-                       struct snd_kcontrol *k, int event)
-{
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-       unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
-
-       if (SND_SOC_DAPM_EVENT_ON(event)) {
-               ldo = BIT(5)|BIT(4);
-               clk_dir = BIT(2);
-               data_dir = BIT(1);
-       }
-       /* program DMIC LDO, clock and set clock */
-       snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
-       snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(2), clk_dir);
-       snd_soc_update_bits(codec, SN95031_DMICBUF45, BIT(1), data_dir);
-       return 0;
-}
-
-static int sn95031_dmic56_event(struct snd_soc_dapm_widget *w,
-                       struct snd_kcontrol *k, int event)
-{
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-       unsigned int ldo = 0;
-
-       if (SND_SOC_DAPM_EVENT_ON(event))
-               ldo = BIT(7)|BIT(6);
-
-       /* program DMIC LDO */
-       snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(7)|BIT(6), ldo);
-       return 0;
-}
-
-/* mux controls */
-static const char *sn95031_mic_texts[] = { "AMIC", "LineIn" };
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micl_enum,
-                           SN95031_ADCCONFIG, 1, sn95031_mic_texts);
-
-static const struct snd_kcontrol_new sn95031_micl_mux_control =
-       SOC_DAPM_ENUM("Route", sn95031_micl_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micr_enum,
-                           SN95031_ADCCONFIG, 3, sn95031_mic_texts);
-
-static const struct snd_kcontrol_new sn95031_micr_mux_control =
-       SOC_DAPM_ENUM("Route", sn95031_micr_enum);
-
-static const char *sn95031_input_texts[] = {   "DMIC1", "DMIC2", "DMIC3",
-                                               "DMIC4", "DMIC5", "DMIC6",
-                                               "ADC Left", "ADC Right" };
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input1_enum,
-                           SN95031_AUDIOMUX12, 0, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input1_mux_control =
-       SOC_DAPM_ENUM("Route", sn95031_input1_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input2_enum,
-                           SN95031_AUDIOMUX12, 4, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input2_mux_control =
-       SOC_DAPM_ENUM("Route", sn95031_input2_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input3_enum,
-                           SN95031_AUDIOMUX34, 0, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input3_mux_control =
-       SOC_DAPM_ENUM("Route", sn95031_input3_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input4_enum,
-                           SN95031_AUDIOMUX34, 4, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input4_mux_control =
-       SOC_DAPM_ENUM("Route", sn95031_input4_enum);
-
-/* capture path controls */
-
-static const char *sn95031_micmode_text[] = {"Single Ended", "Differential"};
-
-/* 0dB to 30dB in 10dB steps */
-static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 10, 0);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micmode1_enum,
-                           SN95031_MICAMP1, 1, sn95031_micmode_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_micmode2_enum,
-                           SN95031_MICAMP2, 1, sn95031_micmode_text);
-
-static const char *sn95031_dmic_cfg_text[] = {"GPO", "DMIC"};
-
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic12_cfg_enum,
-                           SN95031_DMICMUX, 0, sn95031_dmic_cfg_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic34_cfg_enum,
-                           SN95031_DMICMUX, 1, sn95031_dmic_cfg_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic56_cfg_enum,
-                           SN95031_DMICMUX, 2, sn95031_dmic_cfg_text);
-
-static const struct snd_kcontrol_new sn95031_snd_controls[] = {
-       SOC_ENUM("Mic1Mode Capture Route", sn95031_micmode1_enum),
-       SOC_ENUM("Mic2Mode Capture Route", sn95031_micmode2_enum),
-       SOC_ENUM("DMIC12 Capture Route", sn95031_dmic12_cfg_enum),
-       SOC_ENUM("DMIC34 Capture Route", sn95031_dmic34_cfg_enum),
-       SOC_ENUM("DMIC56 Capture Route", sn95031_dmic56_cfg_enum),
-       SOC_SINGLE_TLV("Mic1 Capture Volume", SN95031_MICAMP1,
-                       2, 4, 0, mic_tlv),
-       SOC_SINGLE_TLV("Mic2 Capture Volume", SN95031_MICAMP2,
-                       2, 4, 0, mic_tlv),
-};
-
-/* DAPM widgets */
-static const struct snd_soc_dapm_widget sn95031_dapm_widgets[] = {
-
-       /* all end points mic, hs etc */
-       SND_SOC_DAPM_OUTPUT("HPOUTL"),
-       SND_SOC_DAPM_OUTPUT("HPOUTR"),
-       SND_SOC_DAPM_OUTPUT("EPOUT"),
-       SND_SOC_DAPM_OUTPUT("IHFOUTL"),
-       SND_SOC_DAPM_OUTPUT("IHFOUTR"),
-       SND_SOC_DAPM_OUTPUT("LINEOUTL"),
-       SND_SOC_DAPM_OUTPUT("LINEOUTR"),
-       SND_SOC_DAPM_OUTPUT("VIB1OUT"),
-       SND_SOC_DAPM_OUTPUT("VIB2OUT"),
-
-       SND_SOC_DAPM_INPUT("AMIC1"), /* headset mic */
-       SND_SOC_DAPM_INPUT("AMIC2"),
-       SND_SOC_DAPM_INPUT("DMIC1"),
-       SND_SOC_DAPM_INPUT("DMIC2"),
-       SND_SOC_DAPM_INPUT("DMIC3"),
-       SND_SOC_DAPM_INPUT("DMIC4"),
-       SND_SOC_DAPM_INPUT("DMIC5"),
-       SND_SOC_DAPM_INPUT("DMIC6"),
-       SND_SOC_DAPM_INPUT("LINEINL"),
-       SND_SOC_DAPM_INPUT("LINEINR"),
-
-       SND_SOC_DAPM_MICBIAS("AMIC1Bias", SN95031_MICBIAS, 2, 0),
-       SND_SOC_DAPM_MICBIAS("AMIC2Bias", SN95031_MICBIAS, 3, 0),
-       SND_SOC_DAPM_MICBIAS("DMIC12Bias", SN95031_DMICMUX, 3, 0),
-       SND_SOC_DAPM_MICBIAS("DMIC34Bias", SN95031_DMICMUX, 4, 0),
-       SND_SOC_DAPM_MICBIAS("DMIC56Bias", SN95031_DMICMUX, 5, 0),
-
-       SND_SOC_DAPM_SUPPLY("DMIC12supply", SN95031_DMICLK, 0, 0,
-                               sn95031_dmic12_event,
-                               SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-       SND_SOC_DAPM_SUPPLY("DMIC34supply", SN95031_DMICLK, 1, 0,
-                               sn95031_dmic34_event,
-                               SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-       SND_SOC_DAPM_SUPPLY("DMIC56supply", SN95031_DMICLK, 2, 0,
-                               sn95031_dmic56_event,
-                               SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-       SND_SOC_DAPM_AIF_OUT("PCM_Out", "Capture", 0,
-                       SND_SOC_NOPM, 0, 0),
-
-       SND_SOC_DAPM_SUPPLY("Headset Rail", SND_SOC_NOPM, 0, 0,
-                       sn95031_vhs_event,
-                       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-       SND_SOC_DAPM_SUPPLY("Speaker Rail", SND_SOC_NOPM, 0, 0,
-                       sn95031_vihf_event,
-                       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
-       /* playback path driver enables */
-       SND_SOC_DAPM_PGA("Headset Left Playback",
-                       SN95031_DRIVEREN, 0, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Headset Right Playback",
-                       SN95031_DRIVEREN, 1, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Speaker Left Playback",
-                       SN95031_DRIVEREN, 2, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Speaker Right Playback",
-                       SN95031_DRIVEREN, 3, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Vibra1 Playback",
-                       SN95031_DRIVEREN, 4, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Vibra2 Playback",
-                       SN95031_DRIVEREN, 5, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Earpiece Playback",
-                       SN95031_DRIVEREN, 6, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Lineout Left Playback",
-                       SN95031_LOCTL, 0, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Lineout Right Playback",
-                       SN95031_LOCTL, 4, 0, NULL, 0),
-
-       /* playback path filter enable */
-       SND_SOC_DAPM_PGA("Headset Left Filter",
-                       SN95031_HSEPRXCTRL, 4, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("Headset Right Filter",
-                       SN95031_HSEPRXCTRL, 5, 0,  NULL, 0),
-       SND_SOC_DAPM_PGA("Speaker Left Filter",
-                       SN95031_IHFRXCTRL, 0, 0,  NULL, 0),
-       SND_SOC_DAPM_PGA("Speaker Right Filter",
-                       SN95031_IHFRXCTRL, 1, 0,  NULL, 0),
-
-       /* DACs */
-       SND_SOC_DAPM_DAC("HSDAC Left", "Headset",
-                       SN95031_DACCONFIG, 0, 0),
-       SND_SOC_DAPM_DAC("HSDAC Right", "Headset",
-                       SN95031_DACCONFIG, 1, 0),
-       SND_SOC_DAPM_DAC("IHFDAC Left", "Speaker",
-                       SN95031_DACCONFIG, 2, 0),
-       SND_SOC_DAPM_DAC("IHFDAC Right", "Speaker",
-                       SN95031_DACCONFIG, 3, 0),
-       SND_SOC_DAPM_DAC("Vibra1 DAC", "Vibra1",
-                       SN95031_VIB1C5, 1, 0),
-       SND_SOC_DAPM_DAC("Vibra2 DAC", "Vibra2",
-                       SN95031_VIB2C5, 1, 0),
-
-       /* capture widgets */
-       SND_SOC_DAPM_PGA("LineIn Enable Left", SN95031_MICAMP1,
-                               7, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("LineIn Enable Right", SN95031_MICAMP2,
-                               7, 0, NULL, 0),
-
-       SND_SOC_DAPM_PGA("MIC1 Enable", SN95031_MICAMP1, 0, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("MIC2 Enable", SN95031_MICAMP2, 0, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("TX1 Enable", SN95031_AUDIOTXEN, 2, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("TX2 Enable", SN95031_AUDIOTXEN, 3, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("TX3 Enable", SN95031_AUDIOTXEN, 4, 0, NULL, 0),
-       SND_SOC_DAPM_PGA("TX4 Enable", SN95031_AUDIOTXEN, 5, 0, NULL, 0),
-
-       /* ADC have null stream as they will be turned ON by TX path */
-       SND_SOC_DAPM_ADC("ADC Left", NULL,
-                       SN95031_ADCCONFIG, 0, 0),
-       SND_SOC_DAPM_ADC("ADC Right", NULL,
-                       SN95031_ADCCONFIG, 2, 0),
-
-       SND_SOC_DAPM_MUX("Mic_InputL Capture Route",
-                       SND_SOC_NOPM, 0, 0, &sn95031_micl_mux_control),
-       SND_SOC_DAPM_MUX("Mic_InputR Capture Route",
-                       SND_SOC_NOPM, 0, 0, &sn95031_micr_mux_control),
-
-       SND_SOC_DAPM_MUX("Txpath1 Capture Route",
-                       SND_SOC_NOPM, 0, 0, &sn95031_input1_mux_control),
-       SND_SOC_DAPM_MUX("Txpath2 Capture Route",
-                       SND_SOC_NOPM, 0, 0, &sn95031_input2_mux_control),
-       SND_SOC_DAPM_MUX("Txpath3 Capture Route",
-                       SND_SOC_NOPM, 0, 0, &sn95031_input3_mux_control),
-       SND_SOC_DAPM_MUX("Txpath4 Capture Route",
-                       SND_SOC_NOPM, 0, 0, &sn95031_input4_mux_control),
-
-};
-
-static const struct snd_soc_dapm_route sn95031_audio_map[] = {
-       /* headset and earpiece map */
-       { "HPOUTL", NULL, "Headset Rail"},
-       { "HPOUTR", NULL, "Headset Rail"},
-       { "HPOUTL", NULL, "Headset Left Playback" },
-       { "HPOUTR", NULL, "Headset Right Playback" },
-       { "EPOUT", NULL, "Earpiece Playback" },
-       { "Headset Left Playback", NULL, "Headset Left Filter"},
-       { "Headset Right Playback", NULL, "Headset Right Filter"},
-       { "Earpiece Playback", NULL, "Headset Left Filter"},
-       { "Headset Left Filter", NULL, "HSDAC Left"},
-       { "Headset Right Filter", NULL, "HSDAC Right"},
-
-       /* speaker map */
-       { "IHFOUTL", NULL, "Speaker Rail"},
-       { "IHFOUTR", NULL, "Speaker Rail"},
-       { "IHFOUTL", NULL, "Speaker Left Playback"},
-       { "IHFOUTR", NULL, "Speaker Right Playback"},
-       { "Speaker Left Playback", NULL, "Speaker Left Filter"},
-       { "Speaker Right Playback", NULL, "Speaker Right Filter"},
-       { "Speaker Left Filter", NULL, "IHFDAC Left"},
-       { "Speaker Right Filter", NULL, "IHFDAC Right"},
-
-       /* vibra map */
-       { "VIB1OUT", NULL, "Vibra1 Playback"},
-       { "Vibra1 Playback", NULL, "Vibra1 DAC"},
-
-       { "VIB2OUT", NULL, "Vibra2 Playback"},
-       { "Vibra2 Playback", NULL, "Vibra2 DAC"},
-
-       /* lineout */
-       { "LINEOUTL", NULL, "Lineout Left Playback"},
-       { "LINEOUTR", NULL, "Lineout Right Playback"},
-       { "Lineout Left Playback", NULL, "Headset Left Filter"},
-       { "Lineout Left Playback", NULL, "Speaker Left Filter"},
-       { "Lineout Left Playback", NULL, "Vibra1 DAC"},
-       { "Lineout Right Playback", NULL, "Headset Right Filter"},
-       { "Lineout Right Playback", NULL, "Speaker Right Filter"},
-       { "Lineout Right Playback", NULL, "Vibra2 DAC"},
-
-       /* Headset (AMIC1) mic */
-       { "AMIC1Bias", NULL, "AMIC1"},
-       { "MIC1 Enable", NULL, "AMIC1Bias"},
-       { "Mic_InputL Capture Route", "AMIC", "MIC1 Enable"},
-
-       /* AMIC2 */
-       { "AMIC2Bias", NULL, "AMIC2"},
-       { "MIC2 Enable", NULL, "AMIC2Bias"},
-       { "Mic_InputR Capture Route", "AMIC", "MIC2 Enable"},
-
-
-       /* Linein */
-       { "LineIn Enable Left", NULL, "LINEINL"},
-       { "LineIn Enable Right", NULL, "LINEINR"},
-       { "Mic_InputL Capture Route", "LineIn", "LineIn Enable Left"},
-       { "Mic_InputR Capture Route", "LineIn", "LineIn Enable Right"},
-
-       /* ADC connection */
-       { "ADC Left", NULL, "Mic_InputL Capture Route"},
-       { "ADC Right", NULL, "Mic_InputR Capture Route"},
-
-       /*DMIC connections */
-       { "DMIC1", NULL, "DMIC12supply"},
-       { "DMIC2", NULL, "DMIC12supply"},
-       { "DMIC3", NULL, "DMIC34supply"},
-       { "DMIC4", NULL, "DMIC34supply"},
-       { "DMIC5", NULL, "DMIC56supply"},
-       { "DMIC6", NULL, "DMIC56supply"},
-
-       { "DMIC12Bias", NULL, "DMIC1"},
-       { "DMIC12Bias", NULL, "DMIC2"},
-       { "DMIC34Bias", NULL, "DMIC3"},
-       { "DMIC34Bias", NULL, "DMIC4"},
-       { "DMIC56Bias", NULL, "DMIC5"},
-       { "DMIC56Bias", NULL, "DMIC6"},
-
-       /*TX path inputs*/
-       { "Txpath1 Capture Route", "ADC Left", "ADC Left"},
-       { "Txpath2 Capture Route", "ADC Left", "ADC Left"},
-       { "Txpath3 Capture Route", "ADC Left", "ADC Left"},
-       { "Txpath4 Capture Route", "ADC Left", "ADC Left"},
-       { "Txpath1 Capture Route", "ADC Right", "ADC Right"},
-       { "Txpath2 Capture Route", "ADC Right", "ADC Right"},
-       { "Txpath3 Capture Route", "ADC Right", "ADC Right"},
-       { "Txpath4 Capture Route", "ADC Right", "ADC Right"},
-       { "Txpath1 Capture Route", "DMIC1", "DMIC1"},
-       { "Txpath2 Capture Route", "DMIC1", "DMIC1"},
-       { "Txpath3 Capture Route", "DMIC1", "DMIC1"},
-       { "Txpath4 Capture Route", "DMIC1", "DMIC1"},
-       { "Txpath1 Capture Route", "DMIC2", "DMIC2"},
-       { "Txpath2 Capture Route", "DMIC2", "DMIC2"},
-       { "Txpath3 Capture Route", "DMIC2", "DMIC2"},
-       { "Txpath4 Capture Route", "DMIC2", "DMIC2"},
-       { "Txpath1 Capture Route", "DMIC3", "DMIC3"},
-       { "Txpath2 Capture Route", "DMIC3", "DMIC3"},
-       { "Txpath3 Capture Route", "DMIC3", "DMIC3"},
-       { "Txpath4 Capture Route", "DMIC3", "DMIC3"},
-       { "Txpath1 Capture Route", "DMIC4", "DMIC4"},
-       { "Txpath2 Capture Route", "DMIC4", "DMIC4"},
-       { "Txpath3 Capture Route", "DMIC4", "DMIC4"},
-       { "Txpath4 Capture Route", "DMIC4", "DMIC4"},
-       { "Txpath1 Capture Route", "DMIC5", "DMIC5"},
-       { "Txpath2 Capture Route", "DMIC5", "DMIC5"},
-       { "Txpath3 Capture Route", "DMIC5", "DMIC5"},
-       { "Txpath4 Capture Route", "DMIC5", "DMIC5"},
-       { "Txpath1 Capture Route", "DMIC6", "DMIC6"},
-       { "Txpath2 Capture Route", "DMIC6", "DMIC6"},
-       { "Txpath3 Capture Route", "DMIC6", "DMIC6"},
-       { "Txpath4 Capture Route", "DMIC6", "DMIC6"},
-
-       /* tx path */
-       { "TX1 Enable", NULL, "Txpath1 Capture Route"},
-       { "TX2 Enable", NULL, "Txpath2 Capture Route"},
-       { "TX3 Enable", NULL, "Txpath3 Capture Route"},
-       { "TX4 Enable", NULL, "Txpath4 Capture Route"},
-       { "PCM_Out", NULL, "TX1 Enable"},
-       { "PCM_Out", NULL, "TX2 Enable"},
-       { "PCM_Out", NULL, "TX3 Enable"},
-       { "PCM_Out", NULL, "TX4 Enable"},
-
-};
-
-/* speaker and headset mutes, for audio pops and clicks */
-static int sn95031_pcm_hs_mute(struct snd_soc_dai *dai, int mute)
-{
-       snd_soc_update_bits(dai->codec,
-                       SN95031_HSLVOLCTRL, BIT(7), (!mute << 7));
-       snd_soc_update_bits(dai->codec,
-                       SN95031_HSRVOLCTRL, BIT(7), (!mute << 7));
-       return 0;
-}
-
-static int sn95031_pcm_spkr_mute(struct snd_soc_dai *dai, int mute)
-{
-       snd_soc_update_bits(dai->codec,
-                       SN95031_IHFLVOLCTRL, BIT(7), (!mute << 7));
-       snd_soc_update_bits(dai->codec,
-                       SN95031_IHFRVOLCTRL, BIT(7), (!mute << 7));
-       return 0;
-}
-
-static int sn95031_pcm_hw_params(struct snd_pcm_substream *substream,
-               struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
-{
-       unsigned int format, rate;
-
-       switch (params_width(params)) {
-       case 16:
-               format = BIT(4)|BIT(5);
-               break;
-
-       case 24:
-               format = 0;
-               break;
-       default:
-               return -EINVAL;
-       }
-       snd_soc_update_bits(dai->codec, SN95031_PCM2C2,
-                       BIT(4)|BIT(5), format);
-
-       switch (params_rate(params)) {
-       case 48000:
-               pr_debug("RATE_48000\n");
-               rate = 0;
-               break;
-
-       case 44100:
-               pr_debug("RATE_44100\n");
-               rate = BIT(7);
-               break;
-
-       default:
-               pr_err("ERR rate %d\n", params_rate(params));
-               return -EINVAL;
-       }
-       snd_soc_update_bits(dai->codec, SN95031_PCM1C1, BIT(7), rate);
-
-       return 0;
-}
-
-/* Codec DAI section */
-static const struct snd_soc_dai_ops sn95031_headset_dai_ops = {
-       .digital_mute   = sn95031_pcm_hs_mute,
-       .hw_params      = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_speaker_dai_ops = {
-       .digital_mute   = sn95031_pcm_spkr_mute,
-       .hw_params      = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_vib1_dai_ops = {
-       .hw_params      = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_vib2_dai_ops = {
-       .hw_params      = sn95031_pcm_hw_params,
-};
-
-static struct snd_soc_dai_driver sn95031_dais[] = {
-{
-       .name = "SN95031 Headset",
-       .playback = {
-               .stream_name = "Headset",
-               .channels_min = 2,
-               .channels_max = 2,
-               .rates = SN95031_RATES,
-               .formats = SN95031_FORMATS,
-       },
-       .capture = {
-               .stream_name = "Capture",
-               .channels_min = 1,
-               .channels_max = 5,
-               .rates = SN95031_RATES,
-               .formats = SN95031_FORMATS,
-       },
-       .ops = &sn95031_headset_dai_ops,
-},
-{      .name = "SN95031 Speaker",
-       .playback = {
-               .stream_name = "Speaker",
-               .channels_min = 2,
-               .channels_max = 2,
-               .rates = SN95031_RATES,
-               .formats = SN95031_FORMATS,
-       },
-       .ops = &sn95031_speaker_dai_ops,
-},
-{      .name = "SN95031 Vibra1",
-       .playback = {
-               .stream_name = "Vibra1",
-               .channels_min = 1,
-               .channels_max = 1,
-               .rates = SN95031_RATES,
-               .formats = SN95031_FORMATS,
-       },
-       .ops = &sn95031_vib1_dai_ops,
-},
-{      .name = "SN95031 Vibra2",
-       .playback = {
-               .stream_name = "Vibra2",
-               .channels_min = 1,
-               .channels_max = 1,
-               .rates = SN95031_RATES,
-               .formats = SN95031_FORMATS,
-       },
-       .ops = &sn95031_vib2_dai_ops,
-},
-};
-
-static inline void sn95031_disable_jack_btn(struct snd_soc_codec *codec)
-{
-       snd_soc_write(codec, SN95031_BTNCTRL2, 0x00);
-}
-
-static inline void sn95031_enable_jack_btn(struct snd_soc_codec *codec)
-{
-       snd_soc_write(codec, SN95031_BTNCTRL1, 0x77);
-       snd_soc_write(codec, SN95031_BTNCTRL2, 0x01);
-}
-
-static int sn95031_get_headset_state(struct snd_soc_codec *codec,
-       struct snd_soc_jack *mfld_jack)
-{
-       int micbias = sn95031_get_mic_bias(codec);
-
-       int jack_type = snd_soc_jack_get_type(mfld_jack, micbias);
-
-       pr_debug("jack type detected = %d\n", jack_type);
-       if (jack_type == SND_JACK_HEADSET)
-               sn95031_enable_jack_btn(codec);
-       return jack_type;
-}
-
-void sn95031_jack_detection(struct snd_soc_codec *codec,
-       struct mfld_jack_data *jack_data)
-{
-       unsigned int status;
-       unsigned int mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_HEADSET;
-
-       pr_debug("interrupt id read in sram = 0x%x\n", jack_data->intr_id);
-       if (jack_data->intr_id & 0x1) {
-               pr_debug("short_push detected\n");
-               status = SND_JACK_HEADSET | SND_JACK_BTN_0;
-       } else if (jack_data->intr_id & 0x2) {
-               pr_debug("long_push detected\n");
-               status = SND_JACK_HEADSET | SND_JACK_BTN_1;
-       } else if (jack_data->intr_id & 0x4) {
-               pr_debug("headset or headphones inserted\n");
-               status = sn95031_get_headset_state(codec, jack_data->mfld_jack);
-       } else if (jack_data->intr_id & 0x8) {
-               pr_debug("headset or headphones removed\n");
-               status = 0;
-               sn95031_disable_jack_btn(codec);
-       } else {
-               pr_err("unidentified interrupt\n");
-               return;
-       }
-
-       snd_soc_jack_report(jack_data->mfld_jack, status, mask);
-       /*button pressed and released so we send explicit button release */
-       if ((status & SND_JACK_BTN_0) | (status & SND_JACK_BTN_1))
-               snd_soc_jack_report(jack_data->mfld_jack,
-                               SND_JACK_HEADSET, mask);
-}
-EXPORT_SYMBOL_GPL(sn95031_jack_detection);
-
-/* codec registration */
-static int sn95031_codec_probe(struct snd_soc_codec *codec)
-{
-       pr_debug("codec_probe called\n");
-
-       /* PCM interface config
-        * This sets the pcm rx slot conguration to max 6 slots
-        * for max 4 dais (2 stereo and 2 mono)
-        */
-       snd_soc_write(codec, SN95031_PCM2RXSLOT01, 0x10);
-       snd_soc_write(codec, SN95031_PCM2RXSLOT23, 0x32);
-       snd_soc_write(codec, SN95031_PCM2RXSLOT45, 0x54);
-       snd_soc_write(codec, SN95031_PCM2TXSLOT01, 0x10);
-       snd_soc_write(codec, SN95031_PCM2TXSLOT23, 0x32);
-       /* pcm port setting
-        * This sets the pcm port to slave and clock at 19.2Mhz which
-        * can support 6slots, sampling rate set per stream in hw-params
-        */
-       snd_soc_write(codec, SN95031_PCM1C1, 0x00);
-       snd_soc_write(codec, SN95031_PCM2C1, 0x01);
-       snd_soc_write(codec, SN95031_PCM2C2, 0x0A);
-       snd_soc_write(codec, SN95031_HSMIXER, BIT(0)|BIT(4));
-       /* vendor vibra workround, the vibras are muted by
-        * custom register so unmute them
-        */
-       snd_soc_write(codec, SN95031_SSR5, 0x80);
-       snd_soc_write(codec, SN95031_SSR6, 0x80);
-       snd_soc_write(codec, SN95031_VIB1C5, 0x00);
-       snd_soc_write(codec, SN95031_VIB2C5, 0x00);
-       /* configure vibras for pcm port */
-       snd_soc_write(codec, SN95031_VIB1C3, 0x00);
-       snd_soc_write(codec, SN95031_VIB2C3, 0x00);
-
-       /* soft mute ramp time */
-       snd_soc_write(codec, SN95031_SOFTMUTE, 0x3);
-       /* fix the initial volume at 1dB,
-        * default in +9dB,
-        * 1dB give optimal swing on DAC, amps
-        */
-       snd_soc_write(codec, SN95031_HSLVOLCTRL, 0x08);
-       snd_soc_write(codec, SN95031_HSRVOLCTRL, 0x08);
-       snd_soc_write(codec, SN95031_IHFLVOLCTRL, 0x08);
-       snd_soc_write(codec, SN95031_IHFRVOLCTRL, 0x08);
-       /* dac mode and lineout workaround */
-       snd_soc_write(codec, SN95031_SSR2, 0x10);
-       snd_soc_write(codec, SN95031_SSR3, 0x40);
-
-       return 0;
-}
-
-static const struct snd_soc_codec_driver sn95031_codec = {
-       .probe          = sn95031_codec_probe,
-       .set_bias_level = sn95031_set_vaud_bias,
-       .idle_bias_off  = true,
-
-       .component_driver = {
-               .controls               = sn95031_snd_controls,
-               .num_controls           = ARRAY_SIZE(sn95031_snd_controls),
-               .dapm_widgets           = sn95031_dapm_widgets,
-               .num_dapm_widgets       = ARRAY_SIZE(sn95031_dapm_widgets),
-               .dapm_routes            = sn95031_audio_map,
-               .num_dapm_routes        = ARRAY_SIZE(sn95031_audio_map),
-       },
-};
-
-static int sn95031_device_probe(struct platform_device *pdev)
-{
-       struct regmap *regmap;
-
-       pr_debug("codec device probe called for %s\n", dev_name(&pdev->dev));
-
-       regmap = devm_regmap_init(&pdev->dev, NULL, NULL, &sn95031_regmap);
-       if (IS_ERR(regmap))
-               return PTR_ERR(regmap);
-
-       return snd_soc_register_codec(&pdev->dev, &sn95031_codec,
-                       sn95031_dais, ARRAY_SIZE(sn95031_dais));
-}
-
-static int sn95031_device_remove(struct platform_device *pdev)
-{
-       pr_debug("codec device remove called\n");
-       snd_soc_unregister_codec(&pdev->dev);
-       return 0;
-}
-
-static struct platform_driver sn95031_codec_driver = {
-       .driver         = {
-               .name           = "sn95031",
-       },
-       .probe          = sn95031_device_probe,
-       .remove         = sn95031_device_remove,
-};
-
-module_platform_driver(sn95031_codec_driver);
-
-MODULE_DESCRIPTION("ASoC TI SN95031 codec driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sn95031");
diff --git a/sound/soc/codecs/sn95031.h b/sound/soc/codecs/sn95031.h
deleted file mode 100644 (file)
index 7651fe4..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- *  sn95031.h - TI sn95031 Codec driver
- *
- *  Copyright (C) 2010 Intel Corp
- *  Author: Vinod Koul <vinod.koul@intel.com>
- *  Author: Harsha Priya <priya.harsha@intel.com>
- *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#ifndef _SN95031_H
-#define _SN95031_H
-
-/*register map*/
-#define SN95031_VAUD                   0xDB
-#define SN95031_VHSP                   0xDC
-#define SN95031_VHSN                   0xDD
-#define SN95031_VIHF                   0xC9
-
-#define SN95031_AUDPLLCTRL             0x240
-#define SN95031_DMICBUF0123            0x241
-#define SN95031_DMICBUF45              0x242
-#define SN95031_DMICGPO                        0x244
-#define SN95031_DMICMUX                        0x245
-#define SN95031_DMICLK                 0x246
-#define SN95031_MICBIAS                        0x247
-#define SN95031_ADCCONFIG              0x248
-#define SN95031_MICAMP1                        0x249
-#define SN95031_MICAMP2                        0x24A
-#define SN95031_NOISEMUX               0x24B
-#define SN95031_AUDIOMUX12             0x24C
-#define SN95031_AUDIOMUX34             0x24D
-#define SN95031_AUDIOSINC              0x24E
-#define SN95031_AUDIOTXEN              0x24F
-#define SN95031_HSEPRXCTRL             0x250
-#define SN95031_IHFRXCTRL              0x251
-#define SN95031_HSMIXER                        0x256
-#define SN95031_DACCONFIG              0x257
-#define SN95031_SOFTMUTE               0x258
-#define SN95031_HSLVOLCTRL             0x259
-#define SN95031_HSRVOLCTRL             0x25A
-#define SN95031_IHFLVOLCTRL            0x25B
-#define SN95031_IHFRVOLCTRL            0x25C
-#define SN95031_DRIVEREN               0x25D
-#define SN95031_LOCTL                  0x25E
-#define SN95031_VIB1C1                 0x25F
-#define SN95031_VIB1C2                 0x260
-#define SN95031_VIB1C3                 0x261
-#define SN95031_VIB1SPIPCM1            0x262
-#define SN95031_VIB1SPIPCM2            0x263
-#define SN95031_VIB1C5                 0x264
-#define SN95031_VIB2C1                 0x265
-#define SN95031_VIB2C2                 0x266
-#define SN95031_VIB2C3                 0x267
-#define SN95031_VIB2SPIPCM1            0x268
-#define SN95031_VIB2SPIPCM2            0x269
-#define SN95031_VIB2C5                 0x26A
-#define SN95031_BTNCTRL1               0x26B
-#define SN95031_BTNCTRL2               0x26C
-#define SN95031_PCM1TXSLOT01           0x26D
-#define SN95031_PCM1TXSLOT23           0x26E
-#define SN95031_PCM1TXSLOT45           0x26F
-#define SN95031_PCM1RXSLOT0_3          0x270
-#define SN95031_PCM1RXSLOT45           0x271
-#define SN95031_PCM2TXSLOT01           0x272
-#define SN95031_PCM2TXSLOT23           0x273
-#define SN95031_PCM2TXSLOT45           0x274
-#define SN95031_PCM2RXSLOT01           0x275
-#define SN95031_PCM2RXSLOT23           0x276
-#define SN95031_PCM2RXSLOT45           0x277
-#define SN95031_PCM1C1                 0x278
-#define SN95031_PCM1C2                 0x279
-#define SN95031_PCM1C3                 0x27A
-#define SN95031_PCM2C1                 0x27B
-#define SN95031_PCM2C2                 0x27C
-/*end codec register defn*/
-
-/*vendor defn these are not part of avp*/
-#define SN95031_SSR2                   0x381
-#define SN95031_SSR3                   0x382
-#define SN95031_SSR5                   0x384
-#define SN95031_SSR6                   0x385
-
-/* ADC registers */
-
-#define SN95031_ADC1CNTL1 0x1C0
-#define SN95031_ADC_ENBL 0x10
-#define SN95031_ADC_START 0x08
-#define SN95031_ADC1CNTL3 0x1C2
-#define SN95031_ADCTHERM_ENBL 0x04
-#define SN95031_ADCRRDATA_ENBL 0x05
-#define SN95031_STOPBIT_MASK 16
-#define SN95031_ADCTHERM_MASK 4
-#define SN95031_ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define SN95031_ADC_LOOP_MAX (SN95031_ADC_CHANLS_MAX - 1)
-#define SN95031_ADC_NO_LOOP 0x07
-#define SN95031_AUDIO_GPIO_CTRL 0x070
-
-/* ADC channel code values */
-#define SN95031_AUDIO_DETECT_CODE 0x06
-
-/* ADC base addresses */
-#define SN95031_ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define SN95031_ADC_DATA_START_ADDR 0x1D4  /* increments by 2 */
-/* multipier to convert to mV */
-#define SN95031_ADC_ONE_LSB_MULTIPLIER 2346
-
-
-struct mfld_jack_data {
-       int intr_id;
-       int micbias_vol;
-       struct snd_soc_jack *mfld_jack;
-};
-
-extern void sn95031_jack_detection(struct snd_soc_codec *codec,
-       struct mfld_jack_data *jack_data);
-
-#endif
index 7acd051..c8fd636 100644 (file)
@@ -34,10 +34,11 @@ static const struct snd_soc_dapm_route dir_routes[] = {
 #define STUB_RATES     SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS   (SNDRV_PCM_FMTBIT_S16_LE | \
                        SNDRV_PCM_FMTBIT_S20_3LE | \
-                       SNDRV_PCM_FMTBIT_S24_LE | \
+                       SNDRV_PCM_FMTBIT_S24_LE  | \
+                       SNDRV_PCM_FMTBIT_S32_LE | \
                        SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
 
-static const struct snd_soc_codec_driver soc_codec_spdif_dir = {
+static struct snd_soc_codec_driver soc_codec_spdif_dir = {
        .component_driver = {
                .dapm_widgets           = dir_widgets,
                .num_dapm_widgets       = ARRAY_SIZE(dir_widgets),
index 063a64f..037aa1d 100644 (file)
@@ -27,7 +27,8 @@
 #define STUB_RATES     SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS   (SNDRV_PCM_FMTBIT_S16_LE | \
                        SNDRV_PCM_FMTBIT_S20_3LE | \
-                       SNDRV_PCM_FMTBIT_S24_LE)
+                       SNDRV_PCM_FMTBIT_S24_LE  | \
+                       SNDRV_PCM_FMTBIT_S32_LE)
 
 static const struct snd_soc_dapm_widget dit_widgets[] = {
        SND_SOC_DAPM_OUTPUT("spdif-out"),
@@ -37,7 +38,7 @@ static const struct snd_soc_dapm_route dit_routes[] = {
        { "spdif-out", NULL, "Playback" },
 };
 
-static const struct snd_soc_codec_driver soc_codec_spdif_dit = {
+static struct snd_soc_codec_driver soc_codec_spdif_dit = {
        .component_driver = {
                .dapm_widgets           = dit_widgets,
                .num_dapm_widgets       = ARRAY_SIZE(dit_widgets),
index a736a2a..f3006f3 100644 (file)
 /* Define how often to check (and clear) the fault status register (in ms) */
 #define TAS5720_FAULT_CHECK_INTERVAL           200
 
+enum tas572x_type {
+       TAS5720,
+       TAS5722,
+};
+
 static const char * const tas5720_supply_names[] = {
        "dvdd",         /* Digital power supply. Connect to 3.3-V supply. */
        "pvdd",         /* Class-D amp and analog power supply (connected). */
@@ -47,6 +52,7 @@ struct tas5720_data {
        struct snd_soc_codec *codec;
        struct regmap *regmap;
        struct i2c_client *tas5720_client;
+       enum tas572x_type devtype;
        struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
        struct delayed_work fault_check_work;
        unsigned int last_fault;
@@ -264,7 +270,7 @@ out:
 static int tas5720_codec_probe(struct snd_soc_codec *codec)
 {
        struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
-       unsigned int device_id;
+       unsigned int device_id, expected_device_id;
        int ret;
 
        tas5720->codec = codec;
@@ -276,6 +282,11 @@ static int tas5720_codec_probe(struct snd_soc_codec *codec)
                return ret;
        }
 
+       /*
+        * Take a liberal approach to checking the device ID to allow the
+        * driver to be used even if the device ID does not match, however
+        * issue a warning if there is a mismatch.
+        */
        ret = regmap_read(tas5720->regmap, TAS5720_DEVICE_ID_REG, &device_id);
        if (ret < 0) {
                dev_err(codec->dev, "failed to read device ID register: %d\n",
@@ -283,13 +294,22 @@ static int tas5720_codec_probe(struct snd_soc_codec *codec)
                goto probe_fail;
        }
 
-       if (device_id != TAS5720_DEVICE_ID) {
-               dev_err(codec->dev, "wrong device ID. expected: %u read: %u\n",
-                       TAS5720_DEVICE_ID, device_id);
-               ret = -ENODEV;
-               goto probe_fail;
+       switch (tas5720->devtype) {
+       case TAS5720:
+               expected_device_id = TAS5720_DEVICE_ID;
+               break;
+       case TAS5722:
+               expected_device_id = TAS5722_DEVICE_ID;
+               break;
+       default:
+               dev_err(codec->dev, "unexpected private driver data\n");
+               return -EINVAL;
        }
 
+       if (device_id != expected_device_id)
+               dev_warn(codec->dev, "wrong device ID. expected: %u read: %u\n",
+                        expected_device_id, device_id);
+
        /* Set device to mute */
        ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
                                  TAS5720_MUTE, TAS5720_MUTE);
@@ -446,6 +466,15 @@ static const struct regmap_config tas5720_regmap_config = {
        .volatile_reg = tas5720_is_volatile_reg,
 };
 
+static const struct regmap_config tas5722_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = TAS5722_MAX_REG,
+       .cache_type = REGCACHE_RBTREE,
+       .volatile_reg = tas5720_is_volatile_reg,
+};
+
 /*
  * DAC analog gain. There are four discrete values to select from, ranging
  * from 19.2 dB to 26.3dB.
@@ -544,6 +573,7 @@ static int tas5720_probe(struct i2c_client *client,
 {
        struct device *dev = &client->dev;
        struct tas5720_data *data;
+       const struct regmap_config *regmap_config;
        int ret;
        int i;
 
@@ -552,7 +582,20 @@ static int tas5720_probe(struct i2c_client *client,
                return -ENOMEM;
 
        data->tas5720_client = client;
-       data->regmap = devm_regmap_init_i2c(client, &tas5720_regmap_config);
+       data->devtype = id->driver_data;
+
+       switch (id->driver_data) {
+       case TAS5720:
+               regmap_config = &tas5720_regmap_config;
+               break;
+       case TAS5722:
+               regmap_config = &tas5722_regmap_config;
+               break;
+       default:
+               dev_err(dev, "unexpected private driver data\n");
+               return -EINVAL;
+       }
+       data->regmap = devm_regmap_init_i2c(client, regmap_config);
        if (IS_ERR(data->regmap)) {
                ret = PTR_ERR(data->regmap);
                dev_err(dev, "failed to allocate register map: %d\n", ret);
@@ -592,7 +635,8 @@ static int tas5720_remove(struct i2c_client *client)
 }
 
 static const struct i2c_device_id tas5720_id[] = {
-       { "tas5720", 0 },
+       { "tas5720", TAS5720 },
+       { "tas5722", TAS5722 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, tas5720_id);
@@ -600,6 +644,7 @@ MODULE_DEVICE_TABLE(i2c, tas5720_id);
 #if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id tas5720_of_match[] = {
        { .compatible = "ti,tas5720", },
+       { .compatible = "ti,tas5722", },
        { },
 };
 MODULE_DEVICE_TABLE(of, tas5720_of_match);
index 3d077c7..1dda309 100644 (file)
 #define TAS5720_DIGITAL_CLIP1_REG      0x11
 #define TAS5720_MAX_REG                        TAS5720_DIGITAL_CLIP1_REG
 
+/* Additional TAS5722-specific Registers */
+#define TAS5722_DIGITAL_CTRL2_REG      0x13
+#define TAS5722_ANALOG_CTRL2_REG       0x14
+#define TAS5722_MAX_REG                        TAS5722_ANALOG_CTRL2_REG
+
 /* TAS5720_DEVICE_ID_REG */
 #define TAS5720_DEVICE_ID              0x01
+#define TAS5722_DEVICE_ID              0x12
 
 /* TAS5720_POWER_CTRL_REG */
 #define TAS5720_DIG_CLIP_MASK          GENMASK(7, 2)
@@ -51,6 +57,7 @@
 #define TAS5720_SAIF_FORMAT_MASK       GENMASK(2, 0)
 
 /* TAS5720_DIGITAL_CTRL2_REG */
+#define TAS5722_VOL_RAMP_RATE          BIT(6)
 #define TAS5720_MUTE                   BIT(4)
 #define TAS5720_TDM_SLOT_SEL_MASK      GENMASK(2, 0)
 
 #define TAS5720_CLIP1_MASK             GENMASK(7, 2)
 #define TAS5720_CLIP1_SHIFT            (0x2)
 
+/* TAS5722_DIGITAL_CTRL2_REG */
+#define TAS5722_HPF_3_7HZ              (0x0 << 5)
+#define TAS5722_HPF_7_4HZ              (0x1 << 5)
+#define TAS5722_HPF_14_9HZ             (0x2 << 5)
+#define TAS5722_HPF_29_7HZ             (0x3 << 5)
+#define TAS5722_HPF_59_4HZ             (0x4 << 5)
+#define TAS5722_HPF_118_4HZ            (0x5 << 5)
+#define TAS5722_HPF_235_0HZ            (0x6 << 5)
+#define TAS5722_HPF_463_2HZ            (0x7 << 5)
+#define TAS5722_HPF_MASK               GENMASK(7, 5)
+#define TAS5722_AUTO_SLEEP_OFF         (0x0 << 3)
+#define TAS5722_AUTO_SLEEP_1024LR      (0x1 << 3)
+#define TAS5722_AUTO_SLEEP_65536LR     (0x2 << 3)
+#define TAS5722_AUTO_SLEEP_262144LR    (0x3 << 3)
+#define TAS5722_AUTO_SLEEP_MASK                GENMASK(4, 3)
+#define TAS5722_TDM_SLOT_16B           BIT(2)
+#define TAS5722_MCLK_PIN_CFG           BIT(1)
+#define TAS5722_VOL_CONTROL_LSB                BIT(0)
+
+/* TAS5722_ANALOG_CTRL2_REG */
+#define TAS5722_FAULTZ_PU              BIT(3)
+#define TAS5722_VREG_LVL               BIT(2)
+#define TAS5722_PWR_TUNE               BIT(0)
+
 #endif /* __TAS5720_H__ */
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
new file mode 100644 (file)
index 0000000..49b87f6
--- /dev/null
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ALSA SoC Texas Instruments TAS6424 Quad-Channel Audio Amplifier
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *     Author: Andreas Dannenberg <dannenberg@ti.com>
+ *     Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tas6424.h"
+
+/* Define how often to check (and clear) the fault status register (in ms) */
+#define TAS6424_FAULT_CHECK_INTERVAL 200
+
+static const char * const tas6424_supply_names[] = {
+       "dvdd", /* Digital power supply. Connect to 3.3-V supply. */
+       "vbat", /* Supply used for higher voltage analog circuits. */
+       "pvdd", /* Class-D amp output FETs supply. */
+};
+#define TAS6424_NUM_SUPPLIES ARRAY_SIZE(tas6424_supply_names)
+
+struct tas6424_data {
+       struct device *dev;
+       struct regmap *regmap;
+       struct regulator_bulk_data supplies[TAS6424_NUM_SUPPLIES];
+       struct delayed_work fault_check_work;
+       unsigned int last_fault1;
+       unsigned int last_fault2;
+       unsigned int last_warn;
+};
+
+/*
+ * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
+ * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
+ * as per device datasheet.
+ */
+static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
+
+static const struct snd_kcontrol_new tas6424_snd_controls[] = {
+       SOC_SINGLE_TLV("Speaker Driver CH1 Playback Volume",
+                      TAS6424_CH1_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+       SOC_SINGLE_TLV("Speaker Driver CH2 Playback Volume",
+                      TAS6424_CH2_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+       SOC_SINGLE_TLV("Speaker Driver CH3 Playback Volume",
+                      TAS6424_CH3_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+       SOC_SINGLE_TLV("Speaker Driver CH4 Playback Volume",
+                      TAS6424_CH4_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+};
+
+static int tas6424_dac_event(struct snd_soc_dapm_widget *w,
+                            struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+
+       dev_dbg(codec->dev, "%s() event=0x%0x\n", __func__, event);
+
+       if (event & SND_SOC_DAPM_POST_PMU) {
+               /* Observe codec shutdown-to-active time */
+               msleep(12);
+
+               /* Turn on TAS6424 periodic fault checking/handling */
+               tas6424->last_fault1 = 0;
+               tas6424->last_fault2 = 0;
+               tas6424->last_warn = 0;
+               schedule_delayed_work(&tas6424->fault_check_work,
+                                     msecs_to_jiffies(TAS6424_FAULT_CHECK_INTERVAL));
+       } else if (event & SND_SOC_DAPM_PRE_PMD) {
+               /* Disable TAS6424 periodic fault checking/handling */
+               cancel_delayed_work_sync(&tas6424->fault_check_work);
+       }
+
+       return 0;
+}
+
+static const struct snd_soc_dapm_widget tas6424_dapm_widgets[] = {
+       SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas6424_dac_event,
+                          SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+       SND_SOC_DAPM_OUTPUT("OUT")
+};
+
+static const struct snd_soc_dapm_route tas6424_audio_map[] = {
+       { "DAC", NULL, "DAC IN" },
+       { "OUT", NULL, "DAC" },
+};
+
+static int tas6424_hw_params(struct snd_pcm_substream *substream,
+                            struct snd_pcm_hw_params *params,
+                            struct snd_soc_dai *dai)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       unsigned int rate = params_rate(params);
+       unsigned int width = params_width(params);
+       u8 sap_ctrl = 0;
+
+       dev_dbg(codec->dev, "%s() rate=%u width=%u\n", __func__, rate, width);
+
+       switch (rate) {
+       case 44100:
+               sap_ctrl |= TAS6424_SAP_RATE_44100;
+               break;
+       case 48000:
+               sap_ctrl |= TAS6424_SAP_RATE_48000;
+               break;
+       case 96000:
+               sap_ctrl |= TAS6424_SAP_RATE_96000;
+               break;
+       default:
+               dev_err(codec->dev, "unsupported sample rate: %u\n", rate);
+               return -EINVAL;
+       }
+
+       switch (width) {
+       case 16:
+               sap_ctrl |= TAS6424_SAP_TDM_SLOT_SZ_16;
+               break;
+       case 24:
+               break;
+       default:
+               dev_err(codec->dev, "unsupported sample width: %u\n", width);
+               return -EINVAL;
+       }
+
+       snd_soc_update_bits(codec, TAS6424_SAP_CTRL,
+                           TAS6424_SAP_RATE_MASK |
+                           TAS6424_SAP_TDM_SLOT_SZ_16,
+                           sap_ctrl);
+
+       return 0;
+}
+
+static int tas6424_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       u8 serial_format = 0;
+
+       dev_dbg(codec->dev, "%s() fmt=0x%0x\n", __func__, fmt);
+
+       /* clock masters */
+       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBS_CFS:
+               break;
+       default:
+               dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+               return -EINVAL;
+       }
+
+       /* signal polarity */
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+       default:
+               dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
+               return -EINVAL;
+       }
+
+       /* interface format */
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               serial_format |= TAS6424_SAP_I2S;
+               break;
+       case SND_SOC_DAIFMT_DSP_A:
+               serial_format |= TAS6424_SAP_DSP;
+               break;
+       case SND_SOC_DAIFMT_DSP_B:
+               /*
+                * We can use the fact that the TAS6424 does not care about the
+                * LRCLK duty cycle during TDM to receive DSP_B formatted data
+                * in LEFTJ mode (no delaying of the 1st data bit).
+                */
+               serial_format |= TAS6424_SAP_LEFTJ;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               serial_format |= TAS6424_SAP_LEFTJ;
+               break;
+       default:
+               dev_err(codec->dev, "Invalid DAI interface format\n");
+               return -EINVAL;
+       }
+
+       snd_soc_update_bits(codec, TAS6424_SAP_CTRL,
+                           TAS6424_SAP_FMT_MASK, serial_format);
+
+       return 0;
+}
+
+static int tas6424_set_dai_tdm_slot(struct snd_soc_dai *dai,
+                                   unsigned int tx_mask, unsigned int rx_mask,
+                                   int slots, int slot_width)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       unsigned int first_slot, last_slot;
+       bool sap_tdm_slot_last;
+
+       dev_dbg(codec->dev, "%s() tx_mask=%d rx_mask=%d\n", __func__,
+               tx_mask, rx_mask);
+
+       if (!tx_mask || !rx_mask)
+               return 0; /* nothing needed to disable TDM mode */
+
+       /*
+        * Determine the first slot and last slot that is being requested so
+        * we'll be able to more easily enforce certain constraints as the
+        * TAS6424's TDM interface is not fully configurable.
+        */
+       first_slot = __ffs(tx_mask);
+       last_slot = __fls(rx_mask);
+
+       if (last_slot - first_slot != 4) {
+               dev_err(codec->dev, "tdm mask must cover 4 contiguous slots\n");
+               return -EINVAL;
+       }
+
+       switch (first_slot) {
+       case 0:
+               sap_tdm_slot_last = false;
+               break;
+       case 4:
+               sap_tdm_slot_last = true;
+               break;
+       default:
+               dev_err(codec->dev, "tdm mask must start at slot 0 or 4\n");
+               return -EINVAL;
+       }
+
+       snd_soc_update_bits(codec, TAS6424_SAP_CTRL, TAS6424_SAP_TDM_SLOT_LAST,
+                           sap_tdm_slot_last ? TAS6424_SAP_TDM_SLOT_LAST : 0);
+
+       return 0;
+}
+
+static int tas6424_mute(struct snd_soc_dai *dai, int mute)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       unsigned int val;
+
+       dev_dbg(codec->dev, "%s() mute=%d\n", __func__, mute);
+
+       if (mute)
+               val = TAS6424_ALL_STATE_MUTE;
+       else
+               val = TAS6424_ALL_STATE_PLAY;
+
+       snd_soc_write(codec, TAS6424_CH_STATE_CTRL, val);
+
+       return 0;
+}
+
+static int tas6424_power_off(struct snd_soc_codec *codec)
+{
+       struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       snd_soc_write(codec, TAS6424_CH_STATE_CTRL, TAS6424_ALL_STATE_HIZ);
+
+       regcache_cache_only(tas6424->regmap, true);
+       regcache_mark_dirty(tas6424->regmap);
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(tas6424->supplies),
+                                    tas6424->supplies);
+       if (ret < 0) {
+               dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int tas6424_power_on(struct snd_soc_codec *codec)
+{
+       struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(tas6424->supplies),
+                                   tas6424->supplies);
+       if (ret < 0) {
+               dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
+               return ret;
+       }
+
+       regcache_cache_only(tas6424->regmap, false);
+
+       ret = regcache_sync(tas6424->regmap);
+       if (ret < 0) {
+               dev_err(codec->dev, "failed to sync regcache: %d\n", ret);
+               return ret;
+       }
+
+       snd_soc_write(codec, TAS6424_CH_STATE_CTRL, TAS6424_ALL_STATE_MUTE);
+
+       /* any time we come out of HIZ, the output channels automatically run DC
+        * load diagnostics, wait here until this completes
+        */
+       msleep(230);
+
+       return 0;
+}
+
+static int tas6424_set_bias_level(struct snd_soc_codec *codec,
+                                 enum snd_soc_bias_level level)
+{
+       dev_dbg(codec->dev, "%s() level=%d\n", __func__, level);
+
+       switch (level) {
+       case SND_SOC_BIAS_ON:
+       case SND_SOC_BIAS_PREPARE:
+               break;
+       case SND_SOC_BIAS_STANDBY:
+               if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+                       tas6424_power_on(codec);
+               break;
+       case SND_SOC_BIAS_OFF:
+               tas6424_power_off(codec);
+               break;
+       }
+
+       return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tas6424 = {
+       .set_bias_level = tas6424_set_bias_level,
+       .idle_bias_off = true,
+
+       .component_driver = {
+               .controls = tas6424_snd_controls,
+               .num_controls = ARRAY_SIZE(tas6424_snd_controls),
+               .dapm_widgets = tas6424_dapm_widgets,
+               .num_dapm_widgets = ARRAY_SIZE(tas6424_dapm_widgets),
+               .dapm_routes = tas6424_audio_map,
+               .num_dapm_routes = ARRAY_SIZE(tas6424_audio_map),
+       },
+};
+
+static struct snd_soc_dai_ops tas6424_speaker_dai_ops = {
+       .hw_params      = tas6424_hw_params,
+       .set_fmt        = tas6424_set_dai_fmt,
+       .set_tdm_slot   = tas6424_set_dai_tdm_slot,
+       .digital_mute   = tas6424_mute,
+};
+
+static struct snd_soc_dai_driver tas6424_dai[] = {
+       {
+               .name = "tas6424-amplifier",
+               .playback = {
+                       .stream_name = "Playback",
+                       .channels_min = 1,
+                       .channels_max = 4,
+                       .rates = TAS6424_RATES,
+                       .formats = TAS6424_FORMATS,
+               },
+               .ops = &tas6424_speaker_dai_ops,
+       },
+};
+
+static void tas6424_fault_check_work(struct work_struct *work)
+{
+       struct tas6424_data *tas6424 = container_of(work, struct tas6424_data,
+                                                   fault_check_work.work);
+       struct device *dev = tas6424->dev;
+       unsigned int reg;
+       int ret;
+
+       ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT1, &reg);
+       if (ret < 0) {
+               dev_err(dev, "failed to read FAULT1 register: %d\n", ret);
+               goto out;
+       }
+
+       /*
+        * Ignore any clock faults as there is no clean way to check for them.
+        * We would need to start checking for those faults *after* the SAIF
+        * stream has been setup, and stop checking *before* the stream is
+        * stopped to avoid any false-positives. However there are no
+        * appropriate hooks to monitor these events.
+        */
+       reg &= TAS6424_FAULT_PVDD_OV |
+              TAS6424_FAULT_VBAT_OV |
+              TAS6424_FAULT_PVDD_UV |
+              TAS6424_FAULT_VBAT_UV;
+
+       if (reg)
+               goto check_global_fault2_reg;
+
+       /*
+        * Only flag errors once for a given occurrence. This is needed as
+        * the TAS6424 will take time clearing the fault condition internally
+        * during which we don't want to bombard the system with the same
+        * error message over and over.
+        */
+       if ((reg & TAS6424_FAULT_PVDD_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_OV))
+               dev_crit(dev, "experienced a PVDD overvoltage fault\n");
+
+       if ((reg & TAS6424_FAULT_VBAT_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_VBAT_OV))
+               dev_crit(dev, "experienced a VBAT overvoltage fault\n");
+
+       if ((reg & TAS6424_FAULT_PVDD_UV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_UV))
+               dev_crit(dev, "experienced a PVDD undervoltage fault\n");
+
+       if ((reg & TAS6424_FAULT_VBAT_UV) && !(tas6424->last_fault1 & TAS6424_FAULT_VBAT_UV))
+               dev_crit(dev, "experienced a VBAT undervoltage fault\n");
+
+       /* Store current fault1 value so we can detect any changes next time */
+       tas6424->last_fault1 = reg;
+
+check_global_fault2_reg:
+       ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT2, &reg);
+       if (ret < 0) {
+               dev_err(dev, "failed to read FAULT2 register: %d\n", ret);
+               goto out;
+       }
+
+       reg &= TAS6424_FAULT_OTSD |
+              TAS6424_FAULT_OTSD_CH1 |
+              TAS6424_FAULT_OTSD_CH2 |
+              TAS6424_FAULT_OTSD_CH3 |
+              TAS6424_FAULT_OTSD_CH4;
+
+       if (!reg)
+               goto check_warn_reg;
+
+       if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
+               dev_crit(dev, "experienced a global overtemp shutdown\n");
+
+       if ((reg & TAS6424_FAULT_OTSD_CH1) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH1))
+               dev_crit(dev, "experienced an overtemp shutdown on CH1\n");
+
+       if ((reg & TAS6424_FAULT_OTSD_CH2) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH2))
+               dev_crit(dev, "experienced an overtemp shutdown on CH2\n");
+
+       if ((reg & TAS6424_FAULT_OTSD_CH3) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH3))
+               dev_crit(dev, "experienced an overtemp shutdown on CH3\n");
+
+       if ((reg & TAS6424_FAULT_OTSD_CH4) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH4))
+               dev_crit(dev, "experienced an overtemp shutdown on CH4\n");
+
+       /* Store current fault2 value so we can detect any changes next time */
+       tas6424->last_fault2 = reg;
+
+check_warn_reg:
+       ret = regmap_read(tas6424->regmap, TAS6424_WARN, &reg);
+       if (ret < 0) {
+               dev_err(dev, "failed to read WARN register: %d\n", ret);
+               goto out;
+       }
+
+       reg &= TAS6424_WARN_VDD_UV |
+              TAS6424_WARN_VDD_POR |
+              TAS6424_WARN_VDD_OTW |
+              TAS6424_WARN_VDD_OTW_CH1 |
+              TAS6424_WARN_VDD_OTW_CH2 |
+              TAS6424_WARN_VDD_OTW_CH3 |
+              TAS6424_WARN_VDD_OTW_CH4;
+
+       if (!reg)
+               goto out;
+
+       if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
+               dev_warn(dev, "experienced a VDD under voltage condition\n");
+
+       if ((reg & TAS6424_WARN_VDD_POR) && !(tas6424->last_warn & TAS6424_WARN_VDD_POR))
+               dev_warn(dev, "experienced a VDD POR condition\n");
+
+       if ((reg & TAS6424_WARN_VDD_OTW) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW))
+               dev_warn(dev, "experienced a global overtemp warning\n");
+
+       if ((reg & TAS6424_WARN_VDD_OTW_CH1) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH1))
+               dev_warn(dev, "experienced an overtemp warning on CH1\n");
+
+       if ((reg & TAS6424_WARN_VDD_OTW_CH2) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH2))
+               dev_warn(dev, "experienced an overtemp warning on CH2\n");
+
+       if ((reg & TAS6424_WARN_VDD_OTW_CH3) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH3))
+               dev_warn(dev, "experienced an overtemp warning on CH3\n");
+
+       if ((reg & TAS6424_WARN_VDD_OTW_CH4) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH4))
+               dev_warn(dev, "experienced an overtemp warning on CH4\n");
+
+       /* Store current warn value so we can detect any changes next time */
+       tas6424->last_warn = reg;
+
+       /* Clear any faults by toggling the CLEAR_FAULT control bit */
+       ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
+                               TAS6424_CLEAR_FAULT, TAS6424_CLEAR_FAULT);
+       if (ret < 0)
+               dev_err(dev, "failed to write MISC_CTRL3 register: %d\n", ret);
+
+       ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
+                               TAS6424_CLEAR_FAULT, 0);
+       if (ret < 0)
+               dev_err(dev, "failed to write MISC_CTRL3 register: %d\n", ret);
+
+out:
+       /* Schedule the next fault check at the specified interval */
+       schedule_delayed_work(&tas6424->fault_check_work,
+                             msecs_to_jiffies(TAS6424_FAULT_CHECK_INTERVAL));
+}
+
+static const struct reg_default tas6424_reg_defaults[] = {
+       { TAS6424_MODE_CTRL,            0x00 },
+       { TAS6424_MISC_CTRL1,           0x32 },
+       { TAS6424_MISC_CTRL2,           0x62 },
+       { TAS6424_SAP_CTRL,             0x04 },
+       { TAS6424_CH_STATE_CTRL,        0x55 },
+       { TAS6424_CH1_VOL_CTRL,         0xcf },
+       { TAS6424_CH2_VOL_CTRL,         0xcf },
+       { TAS6424_CH3_VOL_CTRL,         0xcf },
+       { TAS6424_CH4_VOL_CTRL,         0xcf },
+       { TAS6424_DC_DIAG_CTRL1,        0x00 },
+       { TAS6424_DC_DIAG_CTRL2,        0x11 },
+       { TAS6424_DC_DIAG_CTRL3,        0x11 },
+       { TAS6424_PIN_CTRL,             0xff },
+       { TAS6424_AC_DIAG_CTRL1,        0x00 },
+       { TAS6424_MISC_CTRL3,           0x00 },
+       { TAS6424_CLIP_CTRL,            0x01 },
+       { TAS6424_CLIP_WINDOW,          0x14 },
+       { TAS6424_CLIP_WARN,            0x00 },
+       { TAS6424_CBC_STAT,             0x00 },
+       { TAS6424_MISC_CTRL4,           0x40 },
+};
+
+static bool tas6424_is_writable_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TAS6424_MODE_CTRL:
+       case TAS6424_MISC_CTRL1:
+       case TAS6424_MISC_CTRL2:
+       case TAS6424_SAP_CTRL:
+       case TAS6424_CH_STATE_CTRL:
+       case TAS6424_CH1_VOL_CTRL:
+       case TAS6424_CH2_VOL_CTRL:
+       case TAS6424_CH3_VOL_CTRL:
+       case TAS6424_CH4_VOL_CTRL:
+       case TAS6424_DC_DIAG_CTRL1:
+       case TAS6424_DC_DIAG_CTRL2:
+       case TAS6424_DC_DIAG_CTRL3:
+       case TAS6424_PIN_CTRL:
+       case TAS6424_AC_DIAG_CTRL1:
+       case TAS6424_MISC_CTRL3:
+       case TAS6424_CLIP_CTRL:
+       case TAS6424_CLIP_WINDOW:
+       case TAS6424_CLIP_WARN:
+       case TAS6424_CBC_STAT:
+       case TAS6424_MISC_CTRL4:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool tas6424_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TAS6424_DC_LOAD_DIAG_REP12:
+       case TAS6424_DC_LOAD_DIAG_REP34:
+       case TAS6424_DC_LOAD_DIAG_REPLO:
+       case TAS6424_CHANNEL_STATE:
+       case TAS6424_CHANNEL_FAULT:
+       case TAS6424_GLOB_FAULT1:
+       case TAS6424_GLOB_FAULT2:
+       case TAS6424_WARN:
+       case TAS6424_AC_LOAD_DIAG_REP1:
+       case TAS6424_AC_LOAD_DIAG_REP2:
+       case TAS6424_AC_LOAD_DIAG_REP3:
+       case TAS6424_AC_LOAD_DIAG_REP4:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static const struct regmap_config tas6424_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .writeable_reg = tas6424_is_writable_reg,
+       .volatile_reg = tas6424_is_volatile_reg,
+
+       .max_register = TAS6424_MAX,
+       .reg_defaults = tas6424_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(tas6424_reg_defaults),
+       .cache_type = REGCACHE_RBTREE,
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tas6424_of_ids[] = {
+       { .compatible = "ti,tas6424", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, tas6424_of_ids);
+#endif
+
+static int tas6424_i2c_probe(struct i2c_client *client,
+                            const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct tas6424_data *tas6424;
+       int ret;
+       int i;
+
+       tas6424 = devm_kzalloc(dev, sizeof(*tas6424), GFP_KERNEL);
+       if (!tas6424)
+               return -ENOMEM;
+       dev_set_drvdata(dev, tas6424);
+
+       tas6424->dev = dev;
+
+       tas6424->regmap = devm_regmap_init_i2c(client, &tas6424_regmap_config);
+       if (IS_ERR(tas6424->regmap)) {
+               ret = PTR_ERR(tas6424->regmap);
+               dev_err(dev, "unable to allocate register map: %d\n", ret);
+               return ret;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(tas6424->supplies); i++)
+               tas6424->supplies[i].supply = tas6424_supply_names[i];
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(tas6424->supplies),
+                                     tas6424->supplies);
+       if (ret) {
+               dev_err(dev, "unable to request supplies: %d\n", ret);
+               return ret;
+       }
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(tas6424->supplies),
+                                   tas6424->supplies);
+       if (ret) {
+               dev_err(dev, "unable to enable supplies: %d\n", ret);
+               return ret;
+       }
+
+       /* Reset device to establish well-defined startup state */
+       ret = regmap_update_bits(tas6424->regmap, TAS6424_MODE_CTRL,
+                                TAS6424_RESET, TAS6424_RESET);
+       if (ret) {
+               dev_err(dev, "unable to reset device: %d\n", ret);
+               return ret;
+       }
+
+       INIT_DELAYED_WORK(&tas6424->fault_check_work, tas6424_fault_check_work);
+
+       ret = snd_soc_register_codec(dev, &soc_codec_dev_tas6424,
+                                    tas6424_dai, ARRAY_SIZE(tas6424_dai));
+       if (ret < 0) {
+               dev_err(dev, "unable to register codec: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int tas6424_i2c_remove(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       struct tas6424_data *tas6424 = dev_get_drvdata(dev);
+       int ret;
+
+       snd_soc_unregister_codec(dev);
+
+       cancel_delayed_work_sync(&tas6424->fault_check_work);
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(tas6424->supplies),
+                                    tas6424->supplies);
+       if (ret < 0) {
+               dev_err(dev, "unable to disable supplies: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct i2c_device_id tas6424_i2c_ids[] = {
+       { "tas6424", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tas6424_i2c_ids);
+
+static struct i2c_driver tas6424_i2c_driver = {
+       .driver = {
+               .name = "tas6424",
+               .of_match_table = of_match_ptr(tas6424_of_ids),
+       },
+       .probe = tas6424_i2c_probe,
+       .remove = tas6424_i2c_remove,
+       .id_table = tas6424_i2c_ids,
+};
+module_i2c_driver(tas6424_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TAS6424 Audio amplifier driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas6424.h b/sound/soc/codecs/tas6424.h
new file mode 100644 (file)
index 0000000..4305883
--- /dev/null
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ALSA SoC Texas Instruments TAS6424 Quad-Channel Audio Amplifier
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *     Author: Andreas Dannenberg <dannenberg@ti.com>
+ *     Andrew F. Davis <afd@ti.com>
+ */
+
+#ifndef __TAS6424_H__
+#define __TAS6424_H__
+
+#define TAS6424_RATES (SNDRV_PCM_RATE_44100 | \
+                      SNDRV_PCM_RATE_48000 | \
+                      SNDRV_PCM_RATE_96000)
+
+#define TAS6424_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+                        SNDRV_PCM_FMTBIT_S24_LE)
+
+/* Register Address Map */
+#define TAS6424_MODE_CTRL              0x00
+#define TAS6424_MISC_CTRL1             0x01
+#define TAS6424_MISC_CTRL2             0x02
+#define TAS6424_SAP_CTRL               0x03
+#define TAS6424_CH_STATE_CTRL          0x04
+#define TAS6424_CH1_VOL_CTRL           0x05
+#define TAS6424_CH2_VOL_CTRL           0x06
+#define TAS6424_CH3_VOL_CTRL           0x07
+#define TAS6424_CH4_VOL_CTRL           0x08
+#define TAS6424_DC_DIAG_CTRL1          0x09
+#define TAS6424_DC_DIAG_CTRL2          0x0a
+#define TAS6424_DC_DIAG_CTRL3          0x0b
+#define TAS6424_DC_LOAD_DIAG_REP12     0x0c
+#define TAS6424_DC_LOAD_DIAG_REP34     0x0d
+#define TAS6424_DC_LOAD_DIAG_REPLO     0x0e
+#define TAS6424_CHANNEL_STATE          0x0f
+#define TAS6424_CHANNEL_FAULT          0x10
+#define TAS6424_GLOB_FAULT1            0x11
+#define TAS6424_GLOB_FAULT2            0x12
+#define TAS6424_WARN                   0x13
+#define TAS6424_PIN_CTRL               0x14
+#define TAS6424_AC_DIAG_CTRL1          0x15
+#define TAS6424_AC_DIAG_CTRL2          0x16
+#define TAS6424_AC_LOAD_DIAG_REP1      0x17
+#define TAS6424_AC_LOAD_DIAG_REP2      0x18
+#define TAS6424_AC_LOAD_DIAG_REP3      0x19
+#define TAS6424_AC_LOAD_DIAG_REP4      0x1a
+#define TAS6424_MISC_CTRL3             0x21
+#define TAS6424_CLIP_CTRL              0x22
+#define TAS6424_CLIP_WINDOW            0x23
+#define TAS6424_CLIP_WARN              0x24
+#define TAS6424_CBC_STAT               0x25
+#define TAS6424_MISC_CTRL4             0x26
+#define TAS6424_MAX                    TAS6424_MISC_CTRL4
+
+/* TAS6424_MODE_CTRL_REG */
+#define TAS6424_RESET                  BIT(7)
+
+/* TAS6424_SAP_CTRL_REG */
+#define TAS6424_SAP_RATE_MASK          GENMASK(7, 6)
+#define TAS6424_SAP_RATE_44100         (0x00 << 6)
+#define TAS6424_SAP_RATE_48000         (0x01 << 6)
+#define TAS6424_SAP_RATE_96000         (0x02 << 6)
+#define TAS6424_SAP_TDM_SLOT_LAST      BIT(5)
+#define TAS6424_SAP_TDM_SLOT_SZ_16     BIT(4)
+#define TAS6424_SAP_TDM_SLOT_SWAP      BIT(3)
+#define TAS6424_SAP_FMT_MASK           GENMASK(2, 0)
+#define TAS6424_SAP_RIGHTJ_24          (0x00 << 0)
+#define TAS6424_SAP_RIGHTJ_20          (0x01 << 0)
+#define TAS6424_SAP_RIGHTJ_18          (0x02 << 0)
+#define TAS6424_SAP_RIGHTJ_16          (0x03 << 0)
+#define TAS6424_SAP_I2S                        (0x04 << 0)
+#define TAS6424_SAP_LEFTJ              (0x05 << 0)
+#define TAS6424_SAP_DSP                        (0x06 << 0)
+
+/* TAS6424_CH_STATE_CTRL_REG */
+#define TAS6424_CH1_STATE_MASK         GENMASK(7, 6)
+#define TAS6424_CH1_STATE_PLAY         (0x00 << 6)
+#define TAS6424_CH1_STATE_HIZ          (0x01 << 6)
+#define TAS6424_CH1_STATE_MUTE         (0x02 << 6)
+#define TAS6424_CH1_STATE_DIAG         (0x03 << 6)
+#define TAS6424_CH2_STATE_MASK         GENMASK(5, 4)
+#define TAS6424_CH2_STATE_PLAY         (0x00 << 4)
+#define TAS6424_CH2_STATE_HIZ          (0x01 << 4)
+#define TAS6424_CH2_STATE_MUTE         (0x02 << 4)
+#define TAS6424_CH2_STATE_DIAG         (0x03 << 4)
+#define TAS6424_CH3_STATE_MASK         GENMASK(3, 2)
+#define TAS6424_CH3_STATE_PLAY         (0x00 << 2)
+#define TAS6424_CH3_STATE_HIZ          (0x01 << 2)
+#define TAS6424_CH3_STATE_MUTE         (0x02 << 2)
+#define TAS6424_CH3_STATE_DIAG         (0x03 << 2)
+#define TAS6424_CH4_STATE_MASK         GENMASK(1, 0)
+#define TAS6424_CH4_STATE_PLAY         (0x00 << 0)
+#define TAS6424_CH4_STATE_HIZ          (0x01 << 0)
+#define TAS6424_CH4_STATE_MUTE         (0x02 << 0)
+#define TAS6424_CH4_STATE_DIAG         (0x03 << 0)
+#define TAS6424_ALL_STATE_PLAY         (TAS6424_CH1_STATE_PLAY | \
+                                        TAS6424_CH2_STATE_PLAY | \
+                                        TAS6424_CH3_STATE_PLAY | \
+                                        TAS6424_CH4_STATE_PLAY)
+#define TAS6424_ALL_STATE_HIZ          (TAS6424_CH1_STATE_HIZ | \
+                                        TAS6424_CH2_STATE_HIZ | \
+                                        TAS6424_CH3_STATE_HIZ | \
+                                        TAS6424_CH4_STATE_HIZ)
+#define TAS6424_ALL_STATE_MUTE         (TAS6424_CH1_STATE_MUTE | \
+                                        TAS6424_CH2_STATE_MUTE | \
+                                        TAS6424_CH3_STATE_MUTE | \
+                                        TAS6424_CH4_STATE_MUTE)
+#define TAS6424_ALL_STATE_DIAG         (TAS6424_CH1_STATE_DIAG | \
+                                        TAS6424_CH2_STATE_DIAG | \
+                                        TAS6424_CH3_STATE_DIAG | \
+                                        TAS6424_CH4_STATE_DIAG)
+
+/* TAS6424_GLOB_FAULT1_REG */
+#define TAS6424_FAULT_CLOCK            BIT(4)
+#define TAS6424_FAULT_PVDD_OV          BIT(3)
+#define TAS6424_FAULT_VBAT_OV          BIT(2)
+#define TAS6424_FAULT_PVDD_UV          BIT(1)
+#define TAS6424_FAULT_VBAT_UV          BIT(0)
+
+/* TAS6424_GLOB_FAULT2_REG */
+#define TAS6424_FAULT_OTSD             BIT(4)
+#define TAS6424_FAULT_OTSD_CH1         BIT(3)
+#define TAS6424_FAULT_OTSD_CH2         BIT(2)
+#define TAS6424_FAULT_OTSD_CH3         BIT(1)
+#define TAS6424_FAULT_OTSD_CH4         BIT(0)
+
+/* TAS6424_WARN_REG */
+#define TAS6424_WARN_VDD_UV            BIT(6)
+#define TAS6424_WARN_VDD_POR           BIT(5)
+#define TAS6424_WARN_VDD_OTW           BIT(4)
+#define TAS6424_WARN_VDD_OTW_CH1       BIT(3)
+#define TAS6424_WARN_VDD_OTW_CH2       BIT(2)
+#define TAS6424_WARN_VDD_OTW_CH3       BIT(1)
+#define TAS6424_WARN_VDD_OTW_CH4       BIT(0)
+
+/* TAS6424_MISC_CTRL3_REG */
+#define TAS6424_CLEAR_FAULT            BIT(7)
+#define TAS6424_PBTL_CH_SEL            BIT(6)
+#define TAS6424_MASK_CBC_WARN          BIT(5)
+#define TAS6424_MASK_VDD_UV            BIT(4)
+#define TAS6424_OTSD_AUTO_RECOVERY     BIT(3)
+
+#endif /* __TAS6424_H__ */
index f8dd67c..e7ca764 100644 (file)
@@ -316,6 +316,7 @@ static const struct of_device_id tfa9879_of_match[] = {
        { .compatible = "nxp,tfa9879", },
        { }
 };
+MODULE_DEVICE_TABLE(of, tfa9879_of_match);
 
 static struct i2c_driver tfa9879_i2c_driver = {
        .driver = {
index e286237..858cb8b 100644 (file)
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * ALSA SoC TLV320AIC31XX codec driver
+ * ALSA SoC TLV320AIC31xx CODEC Driver
  *
- * Copyright (C) 2014 Texas Instruments, Inc.
- *
- * Author: Jyri Sarha <jsarha@ti.com>
+ * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *     Jyri Sarha <jsarha@ti.com>
  *
  * Based on ground work by: Ajit Kulkarni <x0175765@ti.com>
  *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED AS IS AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- *
- * The TLV320AIC31xx series of audio codec is a low-power, highly integrated
- * high performance codec which provides a stereo DAC, a mono ADC,
+ * The TLV320AIC31xx series of audio codecs are low-power, highly integrated
+ * high performance codecs which provides a stereo DAC, a mono ADC,
  * and mono/stereo Class-D speaker driver.
  */
 
@@ -26,7 +18,7 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/regulator/consumer.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
@@ -144,8 +136,7 @@ static const struct regmap_config aic31xx_i2c_regmap = {
        .max_register = 12 * 128,
 };
 
-#define AIC31XX_NUM_SUPPLIES   6
-static const char * const aic31xx_supply_names[AIC31XX_NUM_SUPPLIES] = {
+static const char * const aic31xx_supply_names[] = {
        "HPVDD",
        "SPRVDD",
        "SPLVDD",
@@ -154,6 +145,8 @@ static const char * const aic31xx_supply_names[AIC31XX_NUM_SUPPLIES] = {
        "DVDD",
 };
 
+#define AIC31XX_NUM_SUPPLIES ARRAY_SIZE(aic31xx_supply_names)
+
 struct aic31xx_disable_nb {
        struct notifier_block nb;
        struct aic31xx_priv *aic31xx;
@@ -164,6 +157,9 @@ struct aic31xx_priv {
        u8 i2c_regs_status;
        struct device *dev;
        struct regmap *regmap;
+       enum aic31xx_type codec_type;
+       struct gpio_desc *gpio_reset;
+       int micbias_vg;
        struct aic31xx_pdata pdata;
        struct regulator_bulk_data supplies[AIC31XX_NUM_SUPPLIES];
        struct aic31xx_disable_nb disable_nb[AIC31XX_NUM_SUPPLIES];
@@ -185,7 +181,7 @@ struct aic31xx_rate_divs {
        u8 madc;
 };
 
-/* ADC dividers can be disabled by cofiguring them to 0 */
+/* ADC dividers can be disabled by configuring them to 0 */
 static const struct aic31xx_rate_divs aic31xx_divs[] = {
        /* mclk/p    rate  pll: j     d        dosr ndac mdac  aors nadc madc */
        /* 8k rate */
@@ -456,7 +452,7 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
                /* change mic bias voltage to user defined */
                snd_soc_update_bits(codec, AIC31XX_MICBIAS,
                                    AIC31XX_MICBIAS_MASK,
-                                   aic31xx->pdata.micbias_vg <<
+                                   aic31xx->micbias_vg <<
                                    AIC31XX_MICBIAS_SHIFT);
                dev_dbg(codec->dev, "%s: turned on\n", __func__);
                break;
@@ -679,14 +675,14 @@ static int aic31xx_add_controls(struct snd_soc_codec *codec)
        int ret = 0;
        struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
 
-       if (!(aic31xx->pdata.codec_type & DAC31XX_BIT))
+       if (!(aic31xx->codec_type & DAC31XX_BIT))
                ret = snd_soc_add_codec_controls(
                        codec, aic31xx_snd_controls,
                        ARRAY_SIZE(aic31xx_snd_controls));
        if (ret)
                return ret;
 
-       if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT)
+       if (aic31xx->codec_type & AIC31XX_STEREO_CLASS_D_BIT)
                ret = snd_soc_add_codec_controls(
                        codec, aic311x_snd_controls,
                        ARRAY_SIZE(aic311x_snd_controls));
@@ -704,7 +700,7 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
        struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
        int ret = 0;
 
-       if (aic31xx->pdata.codec_type & DAC31XX_BIT) {
+       if (aic31xx->codec_type & DAC31XX_BIT) {
                ret = snd_soc_dapm_new_controls(
                        dapm, dac31xx_dapm_widgets,
                        ARRAY_SIZE(dac31xx_dapm_widgets));
@@ -728,7 +724,7 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
                        return ret;
        }
 
-       if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
+       if (aic31xx->codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
                ret = snd_soc_dapm_new_controls(
                        dapm, aic311x_dapm_widgets,
                        ARRAY_SIZE(aic311x_dapm_widgets));
@@ -760,11 +756,17 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
 {
        struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
        int bclk_score = snd_soc_params_to_frame_size(params);
-       int mclk_p = aic31xx->sysclk / aic31xx->p_div;
+       int mclk_p;
        int bclk_n = 0;
        int match = -1;
        int i;
 
+       if (!aic31xx->sysclk || !aic31xx->p_div) {
+               dev_err(codec->dev, "Master clock not supplied\n");
+               return -EINVAL;
+       }
+       mclk_p = aic31xx->sysclk / aic31xx->p_div;
+
        /* Use PLL as CODEC_CLKIN and DAC_CLK as BDIV_CLKIN */
        snd_soc_update_bits(codec, AIC31XX_CLKMUX,
                            AIC31XX_CODEC_CLKIN_MASK, AIC31XX_CODEC_CLKIN_PLL);
@@ -840,11 +842,17 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
 
        dev_dbg(codec->dev,
                "pll %d.%04d/%d dosr %d n %d m %d aosr %d n %d m %d bclk_n %d\n",
-               aic31xx_divs[i].pll_j, aic31xx_divs[i].pll_d,
-               aic31xx->p_div, aic31xx_divs[i].dosr,
-               aic31xx_divs[i].ndac, aic31xx_divs[i].mdac,
-               aic31xx_divs[i].aosr, aic31xx_divs[i].nadc,
-               aic31xx_divs[i].madc, bclk_n);
+               aic31xx_divs[i].pll_j,
+               aic31xx_divs[i].pll_d,
+               aic31xx->p_div,
+               aic31xx_divs[i].dosr,
+               aic31xx_divs[i].ndac,
+               aic31xx_divs[i].mdac,
+               aic31xx_divs[i].aosr,
+               aic31xx_divs[i].nadc,
+               aic31xx_divs[i].madc,
+               bclk_n
+       );
 
        return 0;
 }
@@ -919,8 +927,28 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
        case SND_SOC_DAIFMT_CBM_CFM:
                iface_reg1 |= AIC31XX_BCLK_MASTER | AIC31XX_WCLK_MASTER;
                break;
+       case SND_SOC_DAIFMT_CBS_CFM:
+               iface_reg1 |= AIC31XX_WCLK_MASTER;
+               break;
+       case SND_SOC_DAIFMT_CBM_CFS:
+               iface_reg1 |= AIC31XX_BCLK_MASTER;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFS:
+               break;
+       default:
+               dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+               return -EINVAL;
+       }
+
+       /* signal polarity */
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+       case SND_SOC_DAIFMT_IB_NF:
+               iface_reg2 |= AIC31XX_BCLKINV_MASK;
+               break;
        default:
-               dev_alert(codec->dev, "Invalid DAI master/slave interface\n");
+               dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
                return -EINVAL;
        }
 
@@ -931,16 +959,12 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
        case SND_SOC_DAIFMT_DSP_A:
                dsp_a_val = 0x1; /* fall through */
        case SND_SOC_DAIFMT_DSP_B:
-               /* NOTE: BCLKINV bit value 1 equas NB and 0 equals IB */
-               switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-               case SND_SOC_DAIFMT_NB_NF:
-                       iface_reg2 |= AIC31XX_BCLKINV_MASK;
-                       break;
-               case SND_SOC_DAIFMT_IB_NF:
-                       break;
-               default:
-                       return -EINVAL;
-               }
+               /*
+                * NOTE: This CODEC samples on the falling edge of BCLK in
+                * DSP mode, this is inverted compared to what most DAIs
+                * expect, so we invert for this mode
+                */
+               iface_reg2 ^= AIC31XX_BCLKINV_MASK;
                iface_reg1 |= (AIC31XX_DSP_MODE <<
                               AIC31XX_IFACE1_DATATYPE_SHIFT);
                break;
@@ -981,8 +1005,9 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
        dev_dbg(codec->dev, "## %s: clk_id = %d, freq = %d, dir = %d\n",
                __func__, clk_id, freq, dir);
 
-       for (i = 1; freq/i > 20000000 && i < 8; i++)
-               ;
+       for (i = 1; i < 8; i++)
+               if (freq / i <= 20000000)
+                       break;
        if (freq/i > 20000000) {
                dev_err(aic31xx->dev, "%s: Too high mclk frequency %u\n",
                        __func__, freq);
@@ -990,9 +1015,9 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
        }
        aic31xx->p_div = i;
 
-       for (i = 0; i < ARRAY_SIZE(aic31xx_divs) &&
-                    aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++)
-               ;
+       for (i = 0; i < ARRAY_SIZE(aic31xx_divs); i++)
+               if (aic31xx_divs[i].mclk_p == freq / aic31xx->p_div)
+                       break;
        if (i == ARRAY_SIZE(aic31xx_divs)) {
                dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n",
                        __func__, freq);
@@ -1004,6 +1029,7 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
                            clk_id << AIC31XX_PLL_CLKIN_SHIFT);
 
        aic31xx->sysclk = freq;
+
        return 0;
 }
 
@@ -1019,8 +1045,8 @@ static int aic31xx_regulator_event(struct notifier_block *nb,
                 * Put codec to reset and as at least one of the
                 * supplies was disabled.
                 */
-               if (gpio_is_valid(aic31xx->pdata.gpio_reset))
-                       gpio_set_value(aic31xx->pdata.gpio_reset, 0);
+               if (aic31xx->gpio_reset)
+                       gpiod_set_value(aic31xx->gpio_reset, 1);
 
                regcache_mark_dirty(aic31xx->regmap);
                dev_dbg(aic31xx->dev, "## %s: DISABLE received\n", __func__);
@@ -1029,6 +1055,22 @@ static int aic31xx_regulator_event(struct notifier_block *nb,
        return 0;
 }
 
+static int aic31xx_reset(struct aic31xx_priv *aic31xx)
+{
+       int ret = 0;
+
+       if (aic31xx->gpio_reset) {
+               gpiod_set_value(aic31xx->gpio_reset, 1);
+               ndelay(10); /* At least 10ns */
+               gpiod_set_value(aic31xx->gpio_reset, 0);
+       } else {
+               ret = regmap_write(aic31xx->regmap, AIC31XX_RESET, 1);
+       }
+       mdelay(1); /* At least 1ms */
+
+       return ret;
+}
+
 static void aic31xx_clk_on(struct snd_soc_codec *codec)
 {
        struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
@@ -1065,20 +1107,22 @@ static void aic31xx_clk_off(struct snd_soc_codec *codec)
 static int aic31xx_power_on(struct snd_soc_codec *codec)
 {
        struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
-       int ret = 0;
+       int ret;
 
        ret = regulator_bulk_enable(ARRAY_SIZE(aic31xx->supplies),
                                    aic31xx->supplies);
        if (ret)
                return ret;
 
-       if (gpio_is_valid(aic31xx->pdata.gpio_reset)) {
-               gpio_set_value(aic31xx->pdata.gpio_reset, 1);
-               udelay(100);
-       }
        regcache_cache_only(aic31xx->regmap, false);
+
+       /* Reset device registers for a consistent power-on like state */
+       ret = aic31xx_reset(aic31xx);
+       if (ret < 0)
+               dev_err(aic31xx->dev, "Could not reset device: %d\n", ret);
+
        ret = regcache_sync(aic31xx->regmap);
-       if (ret != 0) {
+       if (ret) {
                dev_err(codec->dev,
                        "Failed to restore cache: %d\n", ret);
                regcache_cache_only(aic31xx->regmap, true);
@@ -1086,19 +1130,17 @@ static int aic31xx_power_on(struct snd_soc_codec *codec)
                                       aic31xx->supplies);
                return ret;
        }
+
        return 0;
 }
 
-static int aic31xx_power_off(struct snd_soc_codec *codec)
+static void aic31xx_power_off(struct snd_soc_codec *codec)
 {
        struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
-       int ret = 0;
 
        regcache_cache_only(aic31xx->regmap, true);
-       ret = regulator_bulk_disable(ARRAY_SIZE(aic31xx->supplies),
-                                    aic31xx->supplies);
-
-       return ret;
+       regulator_bulk_disable(ARRAY_SIZE(aic31xx->supplies),
+                              aic31xx->supplies);
 }
 
 static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
@@ -1137,14 +1179,11 @@ static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
 
 static int aic31xx_codec_probe(struct snd_soc_codec *codec)
 {
-       int ret = 0;
        struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
-       int i;
+       int i, ret;
 
        dev_dbg(aic31xx->dev, "## %s\n", __func__);
 
-       aic31xx = snd_soc_codec_get_drvdata(codec);
-
        aic31xx->codec = codec;
 
        for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++) {
@@ -1169,8 +1208,10 @@ static int aic31xx_codec_probe(struct snd_soc_codec *codec)
                return ret;
 
        ret = aic31xx_add_widgets(codec);
+       if (ret)
+               return ret;
 
-       return ret;
+       return 0;
 }
 
 static int aic31xx_codec_remove(struct snd_soc_codec *codec)
@@ -1258,89 +1299,31 @@ static const struct of_device_id tlv320aic31xx_of_match[] = {
        {},
 };
 MODULE_DEVICE_TABLE(of, tlv320aic31xx_of_match);
-
-static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
-{
-       struct device_node *np = aic31xx->dev->of_node;
-       unsigned int value = MICBIAS_2_0V;
-       int ret;
-
-       of_property_read_u32(np, "ai31xx-micbias-vg", &value);
-       switch (value) {
-       case MICBIAS_2_0V:
-       case MICBIAS_2_5V:
-       case MICBIAS_AVDDV:
-               aic31xx->pdata.micbias_vg = value;
-               break;
-       default:
-               dev_err(aic31xx->dev,
-                       "Bad ai31xx-micbias-vg value %d DT\n",
-                       value);
-               aic31xx->pdata.micbias_vg = MICBIAS_2_0V;
-       }
-
-       ret = of_get_named_gpio(np, "gpio-reset", 0);
-       if (ret > 0)
-               aic31xx->pdata.gpio_reset = ret;
-}
-#else /* CONFIG_OF */
-static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
-{
-}
 #endif /* CONFIG_OF */
 
-static int aic31xx_device_init(struct aic31xx_priv *aic31xx)
-{
-       int ret, i;
-
-       dev_set_drvdata(aic31xx->dev, aic31xx);
-
-       if (dev_get_platdata(aic31xx->dev))
-               memcpy(&aic31xx->pdata, dev_get_platdata(aic31xx->dev),
-                      sizeof(aic31xx->pdata));
-       else if (aic31xx->dev->of_node)
-               aic31xx_pdata_from_of(aic31xx);
-
-       if (aic31xx->pdata.gpio_reset) {
-               ret = devm_gpio_request_one(aic31xx->dev,
-                                           aic31xx->pdata.gpio_reset,
-                                           GPIOF_OUT_INIT_HIGH,
-                                           "aic31xx-reset-pin");
-               if (ret < 0) {
-                       dev_err(aic31xx->dev, "not able to acquire gpio\n");
-                       return ret;
-               }
-       }
-
-       for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
-               aic31xx->supplies[i].supply = aic31xx_supply_names[i];
-
-       ret = devm_regulator_bulk_get(aic31xx->dev,
-                                     ARRAY_SIZE(aic31xx->supplies),
-                                     aic31xx->supplies);
-       if (ret != 0)
-               dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
-
-       return ret;
-}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id aic31xx_acpi_match[] = {
+       { "10TI3100", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
+#endif
 
 static int aic31xx_i2c_probe(struct i2c_client *i2c,
                             const struct i2c_device_id *id)
 {
        struct aic31xx_priv *aic31xx;
-       int ret;
-       const struct regmap_config *regmap_config;
+       unsigned int micbias_value = MICBIAS_2_0V;
+       int i, ret;
 
        dev_dbg(&i2c->dev, "## %s: %s codec_type = %d\n", __func__,
-               id->name, (int) id->driver_data);
-
-       regmap_config = &aic31xx_i2c_regmap;
+               id->name, (int)id->driver_data);
 
        aic31xx = devm_kzalloc(&i2c->dev, sizeof(*aic31xx), GFP_KERNEL);
-       if (aic31xx == NULL)
+       if (!aic31xx)
                return -ENOMEM;
 
-       aic31xx->regmap = devm_regmap_init_i2c(i2c, regmap_config);
+       aic31xx->regmap = devm_regmap_init_i2c(i2c, &aic31xx_i2c_regmap);
        if (IS_ERR(aic31xx->regmap)) {
                ret = PTR_ERR(aic31xx->regmap);
                dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -1349,13 +1332,49 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
        }
        aic31xx->dev = &i2c->dev;
 
-       aic31xx->pdata.codec_type = id->driver_data;
+       aic31xx->codec_type = id->driver_data;
 
-       ret = aic31xx_device_init(aic31xx);
-       if (ret)
+       dev_set_drvdata(aic31xx->dev, aic31xx);
+
+       fwnode_property_read_u32(aic31xx->dev->fwnode, "ai31xx-micbias-vg",
+                                &micbias_value);
+       switch (micbias_value) {
+       case MICBIAS_2_0V:
+       case MICBIAS_2_5V:
+       case MICBIAS_AVDDV:
+               aic31xx->micbias_vg = micbias_value;
+               break;
+       default:
+               dev_err(aic31xx->dev, "Bad ai31xx-micbias-vg value %d\n",
+                       micbias_value);
+               aic31xx->micbias_vg = MICBIAS_2_0V;
+       }
+
+       if (dev_get_platdata(aic31xx->dev)) {
+               memcpy(&aic31xx->pdata, dev_get_platdata(aic31xx->dev), sizeof(aic31xx->pdata));
+               aic31xx->codec_type = aic31xx->pdata.codec_type;
+               aic31xx->micbias_vg = aic31xx->pdata.micbias_vg;
+       }
+
+       aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
+                                                     GPIOD_OUT_LOW);
+       if (IS_ERR(aic31xx->gpio_reset)) {
+               dev_err(aic31xx->dev, "not able to acquire gpio\n");
+               return PTR_ERR(aic31xx->gpio_reset);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
+               aic31xx->supplies[i].supply = aic31xx_supply_names[i];
+
+       ret = devm_regulator_bulk_get(aic31xx->dev,
+                                     ARRAY_SIZE(aic31xx->supplies),
+                                     aic31xx->supplies);
+       if (ret) {
+               dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
                return ret;
+       }
 
-       if (aic31xx->pdata.codec_type & DAC31XX_BIT)
+       if (aic31xx->codec_type & DAC31XX_BIT)
                return snd_soc_register_codec(&i2c->dev,
                                &soc_codec_driver_aic31xx,
                                dac31xx_dai_driver,
@@ -1386,14 +1405,6 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
 
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id aic31xx_acpi_match[] = {
-       { "10TI3100", 0 },
-       { }
-};
-MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
-#endif
-
 static struct i2c_driver aic31xx_i2c_driver = {
        .driver = {
                .name   = "tlv320aic31xx-codec",
@@ -1404,9 +1415,8 @@ static struct i2c_driver aic31xx_i2c_driver = {
        .remove         = aic31xx_i2c_remove,
        .id_table       = aic31xx_i2c_id,
 };
-
 module_i2c_driver(aic31xx_i2c_driver);
 
-MODULE_DESCRIPTION("ASoC TLV320AIC3111 codec driver");
-MODULE_AUTHOR("Jyri Sarha");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("ASoC TLV320AIC31xx CODEC Driver");
+MODULE_LICENSE("GPL v2");
index 730fb20..15ac7cb 100644 (file)
@@ -1,36 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * ALSA SoC TLV320AIC31XX codec driver
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ * ALSA SoC TLV320AIC31xx CODEC Driver Definitions
  *
+ * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/
  */
+
 #ifndef _TLV320AIC31XX_H
 #define _TLV320AIC31XX_H
 
 #define AIC31XX_RATES  SNDRV_PCM_RATE_8000_192000
 
-#define AIC31XX_FORMATS        (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
-                        | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE \
-                        | SNDRV_PCM_FMTBIT_S32_LE)
-
+#define AIC31XX_FORMATS        (SNDRV_PCM_FMTBIT_S16_LE | \
+                        SNDRV_PCM_FMTBIT_S20_3LE | \
+                        SNDRV_PCM_FMTBIT_S24_3LE | \
+                        SNDRV_PCM_FMTBIT_S24_LE | \
+                        SNDRV_PCM_FMTBIT_S32_LE)
 
-#define AIC31XX_STEREO_CLASS_D_BIT     0x1
-#define AIC31XX_MINIDSP_BIT            0x2
-#define DAC31XX_BIT                    0x4
+#define AIC31XX_STEREO_CLASS_D_BIT     BIT(1)
+#define AIC31XX_MINIDSP_BIT            BIT(2)
+#define DAC31XX_BIT                    BIT(3)
 
 enum aic31xx_type {
        AIC3100 = 0,
        AIC3110 = AIC31XX_STEREO_CLASS_D_BIT,
        AIC3120 = AIC31XX_MINIDSP_BIT,
-       AIC3111 = (AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT),
+       AIC3111 = AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT,
        DAC3100 = DAC31XX_BIT,
        DAC3101 = DAC31XX_BIT | AIC31XX_STEREO_CLASS_D_BIT,
 };
@@ -43,222 +37,167 @@ struct aic31xx_pdata {
 
 #define AIC31XX_REG(page, reg) ((page * 128) + reg)
 
-/* Page Control Register */
-#define AIC31XX_PAGECTL                AIC31XX_REG(0, 0)
+#define AIC31XX_PAGECTL                AIC31XX_REG(0, 0) /* Page Control Register */
 
 /* Page 0 Registers */
-/* Software reset register */
-#define AIC31XX_RESET          AIC31XX_REG(0, 1)
-/* OT FLAG register */
-#define AIC31XX_OT_FLAG                AIC31XX_REG(0, 3)
-/* Clock clock Gen muxing, Multiplexers*/
-#define AIC31XX_CLKMUX         AIC31XX_REG(0, 4)
-/* PLL P and R-VAL register */
-#define AIC31XX_PLLPR          AIC31XX_REG(0, 5)
-/* PLL J-VAL register */
-#define AIC31XX_PLLJ           AIC31XX_REG(0, 6)
-/* PLL D-VAL MSB register */
-#define AIC31XX_PLLDMSB                AIC31XX_REG(0, 7)
-/* PLL D-VAL LSB register */
-#define AIC31XX_PLLDLSB                AIC31XX_REG(0, 8)
-/* DAC NDAC_VAL register*/
-#define AIC31XX_NDAC           AIC31XX_REG(0, 11)
-/* DAC MDAC_VAL register */
-#define AIC31XX_MDAC           AIC31XX_REG(0, 12)
-/* DAC OSR setting register 1, MSB value */
-#define AIC31XX_DOSRMSB                AIC31XX_REG(0, 13)
-/* DAC OSR setting register 2, LSB value */
-#define AIC31XX_DOSRLSB                AIC31XX_REG(0, 14)
+#define AIC31XX_RESET          AIC31XX_REG(0, 1) /* Software reset register */
+#define AIC31XX_OT_FLAG                AIC31XX_REG(0, 3) /* OT FLAG register */
+#define AIC31XX_CLKMUX         AIC31XX_REG(0, 4) /* Clock clock Gen muxing, Multiplexers*/
+#define AIC31XX_PLLPR          AIC31XX_REG(0, 5) /* PLL P and R-VAL register */
+#define AIC31XX_PLLJ           AIC31XX_REG(0, 6) /* PLL J-VAL register */
+#define AIC31XX_PLLDMSB                AIC31XX_REG(0, 7) /* PLL D-VAL MSB register */
+#define AIC31XX_PLLDLSB                AIC31XX_REG(0, 8) /* PLL D-VAL LSB register */
+#define AIC31XX_NDAC           AIC31XX_REG(0, 11) /* DAC NDAC_VAL register*/
+#define AIC31XX_MDAC           AIC31XX_REG(0, 12) /* DAC MDAC_VAL register */
+#define AIC31XX_DOSRMSB                AIC31XX_REG(0, 13) /* DAC OSR setting register 1, MSB value */
+#define AIC31XX_DOSRLSB                AIC31XX_REG(0, 14) /* DAC OSR setting register 2, LSB value */
 #define AIC31XX_MINI_DSP_INPOL AIC31XX_REG(0, 16)
-/* Clock setting register 8, PLL */
-#define AIC31XX_NADC           AIC31XX_REG(0, 18)
-/* Clock setting register 9, PLL */
-#define AIC31XX_MADC           AIC31XX_REG(0, 19)
-/* ADC Oversampling (AOSR) Register */
-#define AIC31XX_AOSR           AIC31XX_REG(0, 20)
-/* Clock setting register 9, Multiplexers */
-#define AIC31XX_CLKOUTMUX      AIC31XX_REG(0, 25)
-/* Clock setting register 10, CLOCKOUT M divider value */
-#define AIC31XX_CLKOUTMVAL     AIC31XX_REG(0, 26)
-/* Audio Interface Setting Register 1 */
-#define AIC31XX_IFACE1         AIC31XX_REG(0, 27)
-/* Audio Data Slot Offset Programming */
-#define AIC31XX_DATA_OFFSET    AIC31XX_REG(0, 28)
-/* Audio Interface Setting Register 2 */
-#define AIC31XX_IFACE2         AIC31XX_REG(0, 29)
-/* Clock setting register 11, BCLK N Divider */
-#define AIC31XX_BCLKN          AIC31XX_REG(0, 30)
-/* Audio Interface Setting Register 3, Secondary Audio Interface */
-#define AIC31XX_IFACESEC1      AIC31XX_REG(0, 31)
-/* Audio Interface Setting Register 4 */
-#define AIC31XX_IFACESEC2      AIC31XX_REG(0, 32)
-/* Audio Interface Setting Register 5 */
-#define AIC31XX_IFACESEC3      AIC31XX_REG(0, 33)
-/* I2C Bus Condition */
-#define AIC31XX_I2C            AIC31XX_REG(0, 34)
-/* ADC FLAG */
-#define AIC31XX_ADCFLAG                AIC31XX_REG(0, 36)
-/* DAC Flag Registers */
-#define AIC31XX_DACFLAG1       AIC31XX_REG(0, 37)
+#define AIC31XX_NADC           AIC31XX_REG(0, 18) /* Clock setting register 8, PLL */
+#define AIC31XX_MADC           AIC31XX_REG(0, 19) /* Clock setting register 9, PLL */
+#define AIC31XX_AOSR           AIC31XX_REG(0, 20) /* ADC Oversampling (AOSR) Register */
+#define AIC31XX_CLKOUTMUX      AIC31XX_REG(0, 25) /* Clock setting register 9, Multiplexers */
+#define AIC31XX_CLKOUTMVAL     AIC31XX_REG(0, 26) /* Clock setting register 10, CLOCKOUT M divider value */
+#define AIC31XX_IFACE1         AIC31XX_REG(0, 27) /* Audio Interface Setting Register 1 */
+#define AIC31XX_DATA_OFFSET    AIC31XX_REG(0, 28) /* Audio Data Slot Offset Programming */
+#define AIC31XX_IFACE2         AIC31XX_REG(0, 29) /* Audio Interface Setting Register 2 */
+#define AIC31XX_BCLKN          AIC31XX_REG(0, 30) /* Clock setting register 11, BCLK N Divider */
+#define AIC31XX_IFACESEC1      AIC31XX_REG(0, 31) /* Audio Interface Setting Register 3, Secondary Audio Interface */
+#define AIC31XX_IFACESEC2      AIC31XX_REG(0, 32) /* Audio Interface Setting Register 4 */
+#define AIC31XX_IFACESEC3      AIC31XX_REG(0, 33) /* Audio Interface Setting Register 5 */
+#define AIC31XX_I2C            AIC31XX_REG(0, 34) /* I2C Bus Condition */
+#define AIC31XX_ADCFLAG                AIC31XX_REG(0, 36) /* ADC FLAG */
+#define AIC31XX_DACFLAG1       AIC31XX_REG(0, 37) /* DAC Flag Registers */
 #define AIC31XX_DACFLAG2       AIC31XX_REG(0, 38)
-/* Sticky Interrupt flag (overflow) */
-#define AIC31XX_OFFLAG         AIC31XX_REG(0, 39)
-/* Sticy DAC Interrupt flags */
-#define AIC31XX_INTRDACFLAG    AIC31XX_REG(0, 44)
-/* Sticy ADC Interrupt flags */
-#define AIC31XX_INTRADCFLAG    AIC31XX_REG(0, 45)
-/* DAC Interrupt flags 2 */
-#define AIC31XX_INTRDACFLAG2   AIC31XX_REG(0, 46)
-/* ADC Interrupt flags 2 */
-#define AIC31XX_INTRADCFLAG2   AIC31XX_REG(0, 47)
-/* INT1 interrupt control */
-#define AIC31XX_INT1CTRL       AIC31XX_REG(0, 48)
-/* INT2 interrupt control */
-#define AIC31XX_INT2CTRL       AIC31XX_REG(0, 49)
-/* GPIO1 control */
-#define AIC31XX_GPIO1          AIC31XX_REG(0, 50)
-
+#define AIC31XX_OFFLAG         AIC31XX_REG(0, 39) /* Sticky Interrupt flag (overflow) */
+#define AIC31XX_INTRDACFLAG    AIC31XX_REG(0, 44) /* Sticy DAC Interrupt flags */
+#define AIC31XX_INTRADCFLAG    AIC31XX_REG(0, 45) /* Sticy ADC Interrupt flags */
+#define AIC31XX_INTRDACFLAG2   AIC31XX_REG(0, 46) /* DAC Interrupt flags 2 */
+#define AIC31XX_INTRADCFLAG2   AIC31XX_REG(0, 47) /* ADC Interrupt flags 2 */
+#define AIC31XX_INT1CTRL       AIC31XX_REG(0, 48) /* INT1 interrupt control */
+#define AIC31XX_INT2CTRL       AIC31XX_REG(0, 49) /* INT2 interrupt control */
+#define AIC31XX_GPIO1          AIC31XX_REG(0, 51) /* GPIO1 control */
 #define AIC31XX_DACPRB         AIC31XX_REG(0, 60)
-/* ADC Instruction Set Register */
-#define AIC31XX_ADCPRB         AIC31XX_REG(0, 61)
-/* DAC channel setup register */
-#define AIC31XX_DACSETUP       AIC31XX_REG(0, 63)
-/* DAC Mute and volume control register */
-#define AIC31XX_DACMUTE                AIC31XX_REG(0, 64)
-/* Left DAC channel digital volume control */
-#define AIC31XX_LDACVOL                AIC31XX_REG(0, 65)
-/* Right DAC channel digital volume control */
-#define AIC31XX_RDACVOL                AIC31XX_REG(0, 66)
-/* Headset detection */
-#define AIC31XX_HSDETECT       AIC31XX_REG(0, 67)
-/* ADC Digital Mic */
-#define AIC31XX_ADCSETUP       AIC31XX_REG(0, 81)
-/* ADC Digital Volume Control Fine Adjust */
-#define AIC31XX_ADCFGA         AIC31XX_REG(0, 82)
-/* ADC Digital Volume Control Coarse Adjust */
-#define AIC31XX_ADCVOL         AIC31XX_REG(0, 83)
-
+#define AIC31XX_ADCPRB         AIC31XX_REG(0, 61) /* ADC Instruction Set Register */
+#define AIC31XX_DACSETUP       AIC31XX_REG(0, 63) /* DAC channel setup register */
+#define AIC31XX_DACMUTE                AIC31XX_REG(0, 64) /* DAC Mute and volume control register */
+#define AIC31XX_LDACVOL                AIC31XX_REG(0, 65) /* Left DAC channel digital volume control */
+#define AIC31XX_RDACVOL                AIC31XX_REG(0, 66) /* Right DAC channel digital volume control */
+#define AIC31XX_HSDETECT       AIC31XX_REG(0, 67) /* Headset detection */
+#define AIC31XX_ADCSETUP       AIC31XX_REG(0, 81) /* ADC Digital Mic */
+#define AIC31XX_ADCFGA         AIC31XX_REG(0, 82) /* ADC Digital Volume Control Fine Adjust */
+#define AIC31XX_ADCVOL         AIC31XX_REG(0, 83) /* ADC Digital Volume Control Coarse Adjust */
 
 /* Page 1 Registers */
-/* Headphone drivers */
-#define AIC31XX_HPDRIVER       AIC31XX_REG(1, 31)
-/* Class-D Speakear Amplifier */
-#define AIC31XX_SPKAMP         AIC31XX_REG(1, 32)
-/* HP Output Drivers POP Removal Settings */
-#define AIC31XX_HPPOP          AIC31XX_REG(1, 33)
-/* Output Driver PGA Ramp-Down Period Control */
-#define AIC31XX_SPPGARAMP      AIC31XX_REG(1, 34)
-/* DAC_L and DAC_R Output Mixer Routing */
-#define AIC31XX_DACMIXERROUTE  AIC31XX_REG(1, 35)
-/* Left Analog Vol to HPL */
-#define AIC31XX_LANALOGHPL     AIC31XX_REG(1, 36)
-/* Right Analog Vol to HPR */
-#define AIC31XX_RANALOGHPR     AIC31XX_REG(1, 37)
-/* Left Analog Vol to SPL */
-#define AIC31XX_LANALOGSPL     AIC31XX_REG(1, 38)
-/* Right Analog Vol to SPR */
-#define AIC31XX_RANALOGSPR     AIC31XX_REG(1, 39)
-/* HPL Driver */
-#define AIC31XX_HPLGAIN                AIC31XX_REG(1, 40)
-/* HPR Driver */
-#define AIC31XX_HPRGAIN                AIC31XX_REG(1, 41)
-/* SPL Driver */
-#define AIC31XX_SPLGAIN                AIC31XX_REG(1, 42)
-/* SPR Driver */
-#define AIC31XX_SPRGAIN                AIC31XX_REG(1, 43)
-/* HP Driver Control */
-#define AIC31XX_HPCONTROL      AIC31XX_REG(1, 44)
-/* MIC Bias Control */
-#define AIC31XX_MICBIAS                AIC31XX_REG(1, 46)
-/* MIC PGA*/
-#define AIC31XX_MICPGA         AIC31XX_REG(1, 47)
-/* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
-#define AIC31XX_MICPGAPI       AIC31XX_REG(1, 48)
-/* ADC Input Selection for M-Terminal */
-#define AIC31XX_MICPGAMI       AIC31XX_REG(1, 49)
-/* Input CM Settings */
-#define AIC31XX_MICPGACM       AIC31XX_REG(1, 50)
-
-/* Bits, masks and shifts */
+#define AIC31XX_HPDRIVER       AIC31XX_REG(1, 31) /* Headphone drivers */
+#define AIC31XX_SPKAMP         AIC31XX_REG(1, 32) /* Class-D Speakear Amplifier */
+#define AIC31XX_HPPOP          AIC31XX_REG(1, 33) /* HP Output Drivers POP Removal Settings */
+#define AIC31XX_SPPGARAMP      AIC31XX_REG(1, 34) /* Output Driver PGA Ramp-Down Period Control */
+#define AIC31XX_DACMIXERROUTE  AIC31XX_REG(1, 35) /* DAC_L and DAC_R Output Mixer Routing */
+#define AIC31XX_LANALOGHPL     AIC31XX_REG(1, 36) /* Left Analog Vol to HPL */
+#define AIC31XX_RANALOGHPR     AIC31XX_REG(1, 37) /* Right Analog Vol to HPR */
+#define AIC31XX_LANALOGSPL     AIC31XX_REG(1, 38) /* Left Analog Vol to SPL */
+#define AIC31XX_RANALOGSPR     AIC31XX_REG(1, 39) /* Right Analog Vol to SPR */
+#define AIC31XX_HPLGAIN                AIC31XX_REG(1, 40) /* HPL Driver */
+#define AIC31XX_HPRGAIN                AIC31XX_REG(1, 41) /* HPR Driver */
+#define AIC31XX_SPLGAIN                AIC31XX_REG(1, 42) /* SPL Driver */
+#define AIC31XX_SPRGAIN                AIC31XX_REG(1, 43) /* SPR Driver */
+#define AIC31XX_HPCONTROL      AIC31XX_REG(1, 44) /* HP Driver Control */
+#define AIC31XX_MICBIAS                AIC31XX_REG(1, 46) /* MIC Bias Control */
+#define AIC31XX_MICPGA         AIC31XX_REG(1, 47) /* MIC PGA*/
+#define AIC31XX_MICPGAPI       AIC31XX_REG(1, 48) /* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
+#define AIC31XX_MICPGAMI       AIC31XX_REG(1, 49) /* ADC Input Selection for M-Terminal */
+#define AIC31XX_MICPGACM       AIC31XX_REG(1, 50) /* Input CM Settings */
+
+/* Bits, masks, and shifts */
 
 /* AIC31XX_CLKMUX */
-#define AIC31XX_PLL_CLKIN_MASK                 0x0c
-#define AIC31XX_PLL_CLKIN_SHIFT                        2
-#define AIC31XX_PLL_CLKIN_MCLK                 0
-#define AIC31XX_CODEC_CLKIN_MASK               0x03
-#define AIC31XX_CODEC_CLKIN_SHIFT              0
-#define AIC31XX_CODEC_CLKIN_PLL                        3
-#define AIC31XX_CODEC_CLKIN_BCLK               1
-
-/* AIC31XX_PLLPR, AIC31XX_NDAC, AIC31XX_MDAC, AIC31XX_NADC, AIC31XX_MADC,
-   AIC31XX_BCLKN */
-#define AIC31XX_PLL_MASK               0x7f
-#define AIC31XX_PM_MASK                        0x80
+#define AIC31XX_PLL_CLKIN_MASK         GENMASK(3, 2)
+#define AIC31XX_PLL_CLKIN_SHIFT                (2)
+#define AIC31XX_PLL_CLKIN_MCLK         0x00
+#define AIC31XX_PLL_CLKIN_BCKL         0x01
+#define AIC31XX_PLL_CLKIN_GPIO1                0x02
+#define AIC31XX_PLL_CLKIN_DIN          0x03
+#define AIC31XX_CODEC_CLKIN_MASK       GENMASK(1, 0)
+#define AIC31XX_CODEC_CLKIN_SHIFT      (0)
+#define AIC31XX_CODEC_CLKIN_MCLK       0x00
+#define AIC31XX_CODEC_CLKIN_BCLK       0x01
+#define AIC31XX_CODEC_CLKIN_GPIO1      0x02
+#define AIC31XX_CODEC_CLKIN_PLL                0x03
+
+/* AIC31XX_PLLPR */
+/* AIC31XX_NDAC */
+/* AIC31XX_MDAC */
+/* AIC31XX_NADC */
+/* AIC31XX_MADC */
+/* AIC31XX_BCLKN */
+#define AIC31XX_PLL_MASK               GENMASK(6, 0)
+#define AIC31XX_PM_MASK                        BIT(7)
 
 /* AIC31XX_IFACE1 */
-#define AIC31XX_WORD_LEN_16BITS                0x00
-#define AIC31XX_WORD_LEN_20BITS                0x01
-#define AIC31XX_WORD_LEN_24BITS                0x02
-#define AIC31XX_WORD_LEN_32BITS                0x03
-#define AIC31XX_IFACE1_DATALEN_MASK    0x30
-#define AIC31XX_IFACE1_DATALEN_SHIFT   (4)
-#define AIC31XX_IFACE1_DATATYPE_MASK   0xC0
+#define AIC31XX_IFACE1_DATATYPE_MASK   GENMASK(7, 6)
 #define AIC31XX_IFACE1_DATATYPE_SHIFT  (6)
 #define AIC31XX_I2S_MODE               0x00
 #define AIC31XX_DSP_MODE               0x01
 #define AIC31XX_RIGHT_JUSTIFIED_MODE   0x02
 #define AIC31XX_LEFT_JUSTIFIED_MODE    0x03
-#define AIC31XX_IFACE1_MASTER_MASK     0x0C
-#define AIC31XX_BCLK_MASTER            0x08
-#define AIC31XX_WCLK_MASTER            0x04
+#define AIC31XX_IFACE1_DATALEN_MASK    GENMASK(5, 4)
+#define AIC31XX_IFACE1_DATALEN_SHIFT   (4)
+#define AIC31XX_WORD_LEN_16BITS                0x00
+#define AIC31XX_WORD_LEN_20BITS                0x01
+#define AIC31XX_WORD_LEN_24BITS                0x02
+#define AIC31XX_WORD_LEN_32BITS                0x03
+#define AIC31XX_IFACE1_MASTER_MASK     GENMASK(3, 2)
+#define AIC31XX_BCLK_MASTER            BIT(2)
+#define AIC31XX_WCLK_MASTER            BIT(3)
 
 /* AIC31XX_DATA_OFFSET */
-#define AIC31XX_DATA_OFFSET_MASK       0xFF
+#define AIC31XX_DATA_OFFSET_MASK       GENMASK(7, 0)
 
 /* AIC31XX_IFACE2 */
-#define AIC31XX_BCLKINV_MASK           0x08
-#define AIC31XX_BDIVCLK_MASK           0x03
+#define AIC31XX_BCLKINV_MASK           BIT(3)
+#define AIC31XX_BDIVCLK_MASK           GENMASK(1, 0)
 #define AIC31XX_DAC2BCLK               0x00
 #define AIC31XX_DACMOD2BCLK            0x01
 #define AIC31XX_ADC2BCLK               0x02
 #define AIC31XX_ADCMOD2BCLK            0x03
 
 /* AIC31XX_ADCFLAG */
-#define AIC31XX_ADCPWRSTATUS_MASK              0x40
+#define AIC31XX_ADCPWRSTATUS_MASK      BIT(6)
 
 /* AIC31XX_DACFLAG1 */
-#define AIC31XX_LDACPWRSTATUS_MASK             0x80
-#define AIC31XX_RDACPWRSTATUS_MASK             0x08
-#define AIC31XX_HPLDRVPWRSTATUS_MASK           0x20
-#define AIC31XX_HPRDRVPWRSTATUS_MASK           0x02
-#define AIC31XX_SPLDRVPWRSTATUS_MASK           0x10
-#define AIC31XX_SPRDRVPWRSTATUS_MASK           0x01
+#define AIC31XX_LDACPWRSTATUS_MASK     BIT(7)
+#define AIC31XX_HPLDRVPWRSTATUS_MASK   BIT(5)
+#define AIC31XX_SPLDRVPWRSTATUS_MASK   BIT(4)
+#define AIC31XX_RDACPWRSTATUS_MASK     BIT(3)
+#define AIC31XX_HPRDRVPWRSTATUS_MASK   BIT(1)
+#define AIC31XX_SPRDRVPWRSTATUS_MASK   BIT(0)
 
 /* AIC31XX_INTRDACFLAG */
-#define AIC31XX_HPSCDETECT_MASK                        0x80
-#define AIC31XX_BUTTONPRESS_MASK               0x20
-#define AIC31XX_HSPLUG_MASK                    0x10
-#define AIC31XX_LDRCTHRES_MASK                 0x08
-#define AIC31XX_RDRCTHRES_MASK                 0x04
-#define AIC31XX_DACSINT_MASK                   0x02
-#define AIC31XX_DACAINT_MASK                   0x01
+#define AIC31XX_HPLSCDETECT            BIT(7)
+#define AIC31XX_HPRSCDETECT            BIT(6)
+#define AIC31XX_BUTTONPRESS            BIT(5)
+#define AIC31XX_HSPLUG                 BIT(4)
+#define AIC31XX_LDRCTHRES              BIT(3)
+#define AIC31XX_RDRCTHRES              BIT(2)
+#define AIC31XX_DACSINT                        BIT(1)
+#define AIC31XX_DACAINT                        BIT(0)
 
 /* AIC31XX_INT1CTRL */
-#define AIC31XX_HSPLUGDET_MASK                 0x80
-#define AIC31XX_BUTTONPRESSDET_MASK            0x40
-#define AIC31XX_DRCTHRES_MASK                  0x20
-#define AIC31XX_AGCNOISE_MASK                  0x10
-#define AIC31XX_OC_MASK                                0x08
-#define AIC31XX_ENGINE_MASK                    0x04
+#define AIC31XX_HSPLUGDET              BIT(7)
+#define AIC31XX_BUTTONPRESSDET         BIT(6)
+#define AIC31XX_DRCTHRES               BIT(5)
+#define AIC31XX_AGCNOISE               BIT(4)
+#define AIC31XX_SC                     BIT(3)
+#define AIC31XX_ENGINE                 BIT(2)
 
 /* AIC31XX_DACSETUP */
-#define AIC31XX_SOFTSTEP_MASK                  0x03
+#define AIC31XX_SOFTSTEP_MASK          GENMASK(1, 0)
 
 /* AIC31XX_DACMUTE */
-#define AIC31XX_DACMUTE_MASK                   0x0C
+#define AIC31XX_DACMUTE_MASK           GENMASK(3, 2)
 
 /* AIC31XX_MICBIAS */
-#define AIC31XX_MICBIAS_MASK                   0x03
-#define AIC31XX_MICBIAS_SHIFT                  0
+#define AIC31XX_MICBIAS_MASK           GENMASK(1, 0)
+#define AIC31XX_MICBIAS_SHIFT          0
 
 #endif /* _TLV320AIC31XX_H */
index e694f5f..fea0193 100644 (file)
@@ -281,34 +281,34 @@ static const struct snd_kcontrol_new aic32x4_snd_controls[] = {
 
 static const struct aic32x4_rate_divs aic32x4_divs[] = {
        /* 8k rate */
-       {AIC32X4_FREQ_12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
-       {AIC32X4_FREQ_24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
-       {AIC32X4_FREQ_25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
+       {12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
+       {24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
+       {25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
        /* 11.025k rate */
-       {AIC32X4_FREQ_12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
-       {AIC32X4_FREQ_24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
+       {12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
+       {24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
        /* 16k rate */
-       {AIC32X4_FREQ_12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
-       {AIC32X4_FREQ_24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
-       {AIC32X4_FREQ_25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
+       {12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
+       {24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
+       {25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
        /* 22.05k rate */
-       {AIC32X4_FREQ_12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
-       {AIC32X4_FREQ_24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
-       {AIC32X4_FREQ_25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
+       {12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
+       {24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
+       {25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
        /* 32k rate */
-       {AIC32X4_FREQ_12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
-       {AIC32X4_FREQ_24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
+       {12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
+       {24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
        /* 44.1k rate */
-       {AIC32X4_FREQ_12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
-       {AIC32X4_FREQ_24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
-       {AIC32X4_FREQ_25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
+       {12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
+       {24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
+       {25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
        /* 48k rate */
-       {AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
-       {AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
-       {AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
+       {12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
+       {24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
+       {25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
 
        /* 96k rate */
-       {AIC32X4_FREQ_25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
+       {25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
 };
 
 static const struct snd_kcontrol_new hpl_output_mixer_controls[] = {
@@ -601,9 +601,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
        struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
 
        switch (freq) {
-       case AIC32X4_FREQ_12000000:
-       case AIC32X4_FREQ_24000000:
-       case AIC32X4_FREQ_25000000:
+       case 12000000:
+       case 24000000:
+       case 25000000:
                aic32x4->sysclk = freq;
                return 0;
        }
@@ -614,16 +614,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 {
        struct snd_soc_codec *codec = codec_dai->codec;
-       u8 iface_reg_1;
-       u8 iface_reg_2;
-       u8 iface_reg_3;
-
-       iface_reg_1 = snd_soc_read(codec, AIC32X4_IFACE1);
-       iface_reg_1 = iface_reg_1 & ~(3 << 6 | 3 << 2);
-       iface_reg_2 = snd_soc_read(codec, AIC32X4_IFACE2);
-       iface_reg_2 = 0;
-       iface_reg_3 = snd_soc_read(codec, AIC32X4_IFACE3);
-       iface_reg_3 = iface_reg_3 & ~(1 << 3);
+       u8 iface_reg_1 = 0;
+       u8 iface_reg_2 = 0;
+       u8 iface_reg_3 = 0;
 
        /* set master/slave audio interface */
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -641,30 +634,37 @@ static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        case SND_SOC_DAIFMT_I2S:
                break;
        case SND_SOC_DAIFMT_DSP_A:
-               iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
-               iface_reg_3 |= (1 << 3); /* invert bit clock */
+               iface_reg_1 |= (AIC32X4_DSP_MODE <<
+                               AIC32X4_IFACE1_DATATYPE_SHIFT);
+               iface_reg_3 |= AIC32X4_BCLKINV_MASK; /* invert bit clock */
                iface_reg_2 = 0x01; /* add offset 1 */
                break;
        case SND_SOC_DAIFMT_DSP_B:
-               iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
-               iface_reg_3 |= (1 << 3); /* invert bit clock */
+               iface_reg_1 |= (AIC32X4_DSP_MODE <<
+                               AIC32X4_IFACE1_DATATYPE_SHIFT);
+               iface_reg_3 |= AIC32X4_BCLKINV_MASK; /* invert bit clock */
                break;
        case SND_SOC_DAIFMT_RIGHT_J:
-               iface_reg_1 |=
-                       (AIC32X4_RIGHT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
+               iface_reg_1 |= (AIC32X4_RIGHT_JUSTIFIED_MODE <<
+                               AIC32X4_IFACE1_DATATYPE_SHIFT);
                break;
        case SND_SOC_DAIFMT_LEFT_J:
-               iface_reg_1 |=
-                       (AIC32X4_LEFT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
+               iface_reg_1 |= (AIC32X4_LEFT_JUSTIFIED_MODE <<
+                               AIC32X4_IFACE1_DATATYPE_SHIFT);
                break;
        default:
                printk(KERN_ERR "aic32x4: invalid DAI interface format\n");
                return -EINVAL;
        }
 
-       snd_soc_write(codec, AIC32X4_IFACE1, iface_reg_1);
-       snd_soc_write(codec, AIC32X4_IFACE2, iface_reg_2);
-       snd_soc_write(codec, AIC32X4_IFACE3, iface_reg_3);
+       snd_soc_update_bits(codec, AIC32X4_IFACE1,
+                           AIC32X4_IFACE1_DATATYPE_MASK |
+                           AIC32X4_IFACE1_MASTER_MASK, iface_reg_1);
+       snd_soc_update_bits(codec, AIC32X4_IFACE2,
+                           AIC32X4_DATA_OFFSET_MASK, iface_reg_2);
+       snd_soc_update_bits(codec, AIC32X4_IFACE3,
+                           AIC32X4_BCLKINV_MASK, iface_reg_3);
+
        return 0;
 }
 
@@ -674,7 +674,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
 {
        struct snd_soc_codec *codec = dai->codec;
        struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
-       u8 data;
+       u8 iface1_reg = 0;
+       u8 dacsetup_reg = 0;
        int i;
 
        i = aic32x4_get_divs(aic32x4->sysclk, params_rate(params));
@@ -683,82 +684,88 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
                return i;
        }
 
-       /* Use PLL as CODEC_CLKIN and DAC_MOD_CLK as BDIV_CLKIN */
-       snd_soc_write(codec, AIC32X4_CLKMUX, AIC32X4_PLLCLKIN);
-       snd_soc_write(codec, AIC32X4_IFACE3, AIC32X4_DACMOD2BCLK);
+       /* MCLK as PLL_CLKIN */
+       snd_soc_update_bits(codec, AIC32X4_CLKMUX, AIC32X4_PLL_CLKIN_MASK,
+                           AIC32X4_PLL_CLKIN_MCLK << AIC32X4_PLL_CLKIN_SHIFT);
+       /* PLL as CODEC_CLKIN */
+       snd_soc_update_bits(codec, AIC32X4_CLKMUX, AIC32X4_CODEC_CLKIN_MASK,
+                           AIC32X4_CODEC_CLKIN_PLL << AIC32X4_CODEC_CLKIN_SHIFT);
+       /* DAC_MOD_CLK as BDIV_CLKIN */
+       snd_soc_update_bits(codec, AIC32X4_IFACE3, AIC32X4_BDIVCLK_MASK,
+                           AIC32X4_DACMOD2BCLK << AIC32X4_BDIVCLK_SHIFT);
+
+       /* We will fix R value to 1 and will make P & J=K.D as variable */
+       snd_soc_update_bits(codec, AIC32X4_PLLPR, AIC32X4_PLL_R_MASK, 0x01);
 
-       /* We will fix R value to 1 and will make P & J=K.D as varialble */
-       data = snd_soc_read(codec, AIC32X4_PLLPR);
-       data &= ~(7 << 4);
-       snd_soc_write(codec, AIC32X4_PLLPR,
-                     (data | (aic32x4_divs[i].p_val << 4) | 0x01));
+       /* PLL P value */
+       snd_soc_update_bits(codec, AIC32X4_PLLPR, AIC32X4_PLL_P_MASK,
+                           aic32x4_divs[i].p_val << AIC32X4_PLL_P_SHIFT);
 
+       /* PLL J value */
        snd_soc_write(codec, AIC32X4_PLLJ, aic32x4_divs[i].pll_j);
 
+       /* PLL D value */
        snd_soc_write(codec, AIC32X4_PLLDMSB, (aic32x4_divs[i].pll_d >> 8));
-       snd_soc_write(codec, AIC32X4_PLLDLSB,
-                     (aic32x4_divs[i].pll_d & 0xff));
+       snd_soc_write(codec, AIC32X4_PLLDLSB, (aic32x4_divs[i].pll_d & 0xff));
 
        /* NDAC divider value */
-       data = snd_soc_read(codec, AIC32X4_NDAC);
-       data &= ~(0x7f);
-       snd_soc_write(codec, AIC32X4_NDAC, data | aic32x4_divs[i].ndac);
+       snd_soc_update_bits(codec, AIC32X4_NDAC,
+                           AIC32X4_NDAC_MASK, aic32x4_divs[i].ndac);
 
        /* MDAC divider value */
-       data = snd_soc_read(codec, AIC32X4_MDAC);
-       data &= ~(0x7f);
-       snd_soc_write(codec, AIC32X4_MDAC, data | aic32x4_divs[i].mdac);
+       snd_soc_update_bits(codec, AIC32X4_MDAC,
+                           AIC32X4_MDAC_MASK, aic32x4_divs[i].mdac);
 
        /* DOSR MSB & LSB values */
        snd_soc_write(codec, AIC32X4_DOSRMSB, aic32x4_divs[i].dosr >> 8);
-       snd_soc_write(codec, AIC32X4_DOSRLSB,
-                     (aic32x4_divs[i].dosr & 0xff));
+       snd_soc_write(codec, AIC32X4_DOSRLSB, (aic32x4_divs[i].dosr & 0xff));
 
        /* NADC divider value */
-       data = snd_soc_read(codec, AIC32X4_NADC);
-       data &= ~(0x7f);
-       snd_soc_write(codec, AIC32X4_NADC, data | aic32x4_divs[i].nadc);
+       snd_soc_update_bits(codec, AIC32X4_NADC,
+                           AIC32X4_NADC_MASK, aic32x4_divs[i].nadc);
 
        /* MADC divider value */
-       data = snd_soc_read(codec, AIC32X4_MADC);
-       data &= ~(0x7f);
-       snd_soc_write(codec, AIC32X4_MADC, data | aic32x4_divs[i].madc);
+       snd_soc_update_bits(codec, AIC32X4_MADC,
+                           AIC32X4_MADC_MASK, aic32x4_divs[i].madc);
 
        /* AOSR value */
        snd_soc_write(codec, AIC32X4_AOSR, aic32x4_divs[i].aosr);
 
        /* BCLK N divider */
-       data = snd_soc_read(codec, AIC32X4_BCLKN);
-       data &= ~(0x7f);
-       snd_soc_write(codec, AIC32X4_BCLKN, data | aic32x4_divs[i].blck_N);
+       snd_soc_update_bits(codec, AIC32X4_BCLKN,
+                           AIC32X4_BCLK_MASK, aic32x4_divs[i].blck_N);
 
-       data = snd_soc_read(codec, AIC32X4_IFACE1);
-       data = data & ~(3 << 4);
        switch (params_width(params)) {
        case 16:
+               iface1_reg |= (AIC32X4_WORD_LEN_16BITS <<
+                              AIC32X4_IFACE1_DATALEN_SHIFT);
                break;
        case 20:
-               data |= (AIC32X4_WORD_LEN_20BITS << AIC32X4_DOSRMSB_SHIFT);
+               iface1_reg |= (AIC32X4_WORD_LEN_20BITS <<
+                              AIC32X4_IFACE1_DATALEN_SHIFT);
                break;
        case 24:
-               data |= (AIC32X4_WORD_LEN_24BITS << AIC32X4_DOSRMSB_SHIFT);
+               iface1_reg |= (AIC32X4_WORD_LEN_24BITS <<
+                              AIC32X4_IFACE1_DATALEN_SHIFT);
                break;
        case 32:
-               data |= (AIC32X4_WORD_LEN_32BITS << AIC32X4_DOSRMSB_SHIFT);
+               iface1_reg |= (AIC32X4_WORD_LEN_32BITS <<
+                              AIC32X4_IFACE1_DATALEN_SHIFT);
                break;
        }
-       snd_soc_write(codec, AIC32X4_IFACE1, data);
+       snd_soc_update_bits(codec, AIC32X4_IFACE1,
+                           AIC32X4_IFACE1_DATALEN_MASK, iface1_reg);
 
        if (params_channels(params) == 1) {
-               data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
+               dacsetup_reg = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
        } else {
                if (aic32x4->swapdacs)
-                       data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
+                       dacsetup_reg = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
                else
-                       data = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
+                       dacsetup_reg = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
        }
-       snd_soc_update_bits(codec, AIC32X4_DACSETUP, AIC32X4_DAC_CHAN_MASK,
-                       data);
+       snd_soc_update_bits(codec, AIC32X4_DACSETUP,
+                           AIC32X4_DAC_CHAN_MASK, dacsetup_reg);
 
        return 0;
 }
@@ -766,13 +773,10 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
 static int aic32x4_mute(struct snd_soc_dai *dai, int mute)
 {
        struct snd_soc_codec *codec = dai->codec;
-       u8 dac_reg;
 
-       dac_reg = snd_soc_read(codec, AIC32X4_DACMUTE) & ~AIC32X4_MUTEON;
-       if (mute)
-               snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg | AIC32X4_MUTEON);
-       else
-               snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg);
+       snd_soc_update_bits(codec, AIC32X4_DACMUTE,
+                           AIC32X4_MUTEON, mute ? AIC32X4_MUTEON : 0);
+
        return 0;
 }
 
index da7cec4..e9df49e 100644 (file)
@@ -19,141 +19,189 @@ int aic32x4_remove(struct device *dev);
 
 /* tlv320aic32x4 register space (in decimal to match datasheet) */
 
-#define AIC32X4_PAGE1          128
-
-#define        AIC32X4_PSEL            0
-#define        AIC32X4_RESET           1
-#define        AIC32X4_CLKMUX          4
-#define        AIC32X4_PLLPR           5
-#define        AIC32X4_PLLJ            6
-#define        AIC32X4_PLLDMSB         7
-#define        AIC32X4_PLLDLSB         8
-#define        AIC32X4_NDAC            11
-#define        AIC32X4_MDAC            12
-#define AIC32X4_DOSRMSB                13
-#define AIC32X4_DOSRLSB                14
-#define        AIC32X4_NADC            18
-#define        AIC32X4_MADC            19
-#define AIC32X4_AOSR           20
-#define AIC32X4_CLKMUX2                25
-#define AIC32X4_CLKOUTM                26
-#define AIC32X4_IFACE1         27
-#define AIC32X4_IFACE2         28
-#define AIC32X4_IFACE3         29
-#define AIC32X4_BCLKN          30
-#define AIC32X4_IFACE4         31
-#define AIC32X4_IFACE5         32
-#define AIC32X4_IFACE6         33
-#define AIC32X4_GPIOCTL                52
-#define AIC32X4_DOUTCTL                53
-#define AIC32X4_DINCTL         54
-#define AIC32X4_MISOCTL                55
-#define AIC32X4_SCLKCTL                56
-#define AIC32X4_DACSPB         60
-#define AIC32X4_ADCSPB         61
-#define AIC32X4_DACSETUP       63
-#define AIC32X4_DACMUTE                64
-#define AIC32X4_LDACVOL                65
-#define AIC32X4_RDACVOL                66
-#define AIC32X4_ADCSETUP       81
-#define        AIC32X4_ADCFGA          82
-#define AIC32X4_LADCVOL                83
-#define AIC32X4_RADCVOL                84
-#define AIC32X4_LAGC1          86
-#define AIC32X4_LAGC2          87
-#define AIC32X4_LAGC3          88
-#define AIC32X4_LAGC4          89
-#define AIC32X4_LAGC5          90
-#define AIC32X4_LAGC6          91
-#define AIC32X4_LAGC7          92
-#define AIC32X4_RAGC1          94
-#define AIC32X4_RAGC2          95
-#define AIC32X4_RAGC3          96
-#define AIC32X4_RAGC4          97
-#define AIC32X4_RAGC5          98
-#define AIC32X4_RAGC6          99
-#define AIC32X4_RAGC7          100
-#define AIC32X4_PWRCFG         (AIC32X4_PAGE1 + 1)
-#define AIC32X4_LDOCTL         (AIC32X4_PAGE1 + 2)
-#define AIC32X4_OUTPWRCTL      (AIC32X4_PAGE1 + 9)
-#define AIC32X4_CMMODE         (AIC32X4_PAGE1 + 10)
-#define AIC32X4_HPLROUTE       (AIC32X4_PAGE1 + 12)
-#define AIC32X4_HPRROUTE       (AIC32X4_PAGE1 + 13)
-#define AIC32X4_LOLROUTE       (AIC32X4_PAGE1 + 14)
-#define AIC32X4_LORROUTE       (AIC32X4_PAGE1 + 15)
-#define        AIC32X4_HPLGAIN         (AIC32X4_PAGE1 + 16)
-#define        AIC32X4_HPRGAIN         (AIC32X4_PAGE1 + 17)
-#define        AIC32X4_LOLGAIN         (AIC32X4_PAGE1 + 18)
-#define        AIC32X4_LORGAIN         (AIC32X4_PAGE1 + 19)
-#define AIC32X4_HEADSTART      (AIC32X4_PAGE1 + 20)
-#define AIC32X4_MICBIAS                (AIC32X4_PAGE1 + 51)
-#define AIC32X4_LMICPGAPIN     (AIC32X4_PAGE1 + 52)
-#define AIC32X4_LMICPGANIN     (AIC32X4_PAGE1 + 54)
-#define AIC32X4_RMICPGAPIN     (AIC32X4_PAGE1 + 55)
-#define AIC32X4_RMICPGANIN     (AIC32X4_PAGE1 + 57)
-#define AIC32X4_FLOATINGINPUT  (AIC32X4_PAGE1 + 58)
-#define AIC32X4_LMICPGAVOL     (AIC32X4_PAGE1 + 59)
-#define AIC32X4_RMICPGAVOL     (AIC32X4_PAGE1 + 60)
-
-#define AIC32X4_FREQ_12000000 12000000
-#define AIC32X4_FREQ_24000000 24000000
-#define AIC32X4_FREQ_25000000 25000000
-
-#define AIC32X4_WORD_LEN_16BITS                0x00
-#define AIC32X4_WORD_LEN_20BITS                0x01
-#define AIC32X4_WORD_LEN_24BITS                0x02
-#define AIC32X4_WORD_LEN_32BITS                0x03
-
-#define AIC32X4_LADC_EN                        (1 << 7)
-#define AIC32X4_RADC_EN                        (1 << 6)
-
-#define AIC32X4_I2S_MODE               0x00
-#define AIC32X4_DSP_MODE               0x01
-#define AIC32X4_RIGHT_JUSTIFIED_MODE   0x02
-#define AIC32X4_LEFT_JUSTIFIED_MODE    0x03
-
-#define AIC32X4_AVDDWEAKDISABLE                0x08
-#define AIC32X4_LDOCTLEN               0x01
-
-#define AIC32X4_LDOIN_18_36            0x01
-#define AIC32X4_LDOIN2HP               0x02
-
-#define AIC32X4_DACSPBLOCK_MASK                0x1f
-#define AIC32X4_ADCSPBLOCK_MASK                0x1f
-
-#define AIC32X4_PLLJ_SHIFT             6
-#define AIC32X4_DOSRMSB_SHIFT          4
-
-#define AIC32X4_PLLCLKIN               0x03
-
-#define AIC32X4_MICBIAS_LDOIN          0x08
+#define AIC32X4_REG(page, reg) ((page * 128) + reg)
+
+#define        AIC32X4_PSEL            AIC32X4_REG(0, 0)
+
+#define        AIC32X4_RESET           AIC32X4_REG(0, 1)
+#define        AIC32X4_CLKMUX          AIC32X4_REG(0, 4)
+#define        AIC32X4_PLLPR           AIC32X4_REG(0, 5)
+#define        AIC32X4_PLLJ            AIC32X4_REG(0, 6)
+#define        AIC32X4_PLLDMSB         AIC32X4_REG(0, 7)
+#define        AIC32X4_PLLDLSB         AIC32X4_REG(0, 8)
+#define        AIC32X4_NDAC            AIC32X4_REG(0, 11)
+#define        AIC32X4_MDAC            AIC32X4_REG(0, 12)
+#define AIC32X4_DOSRMSB                AIC32X4_REG(0, 13)
+#define AIC32X4_DOSRLSB                AIC32X4_REG(0, 14)
+#define        AIC32X4_NADC            AIC32X4_REG(0, 18)
+#define        AIC32X4_MADC            AIC32X4_REG(0, 19)
+#define AIC32X4_AOSR           AIC32X4_REG(0, 20)
+#define AIC32X4_CLKMUX2                AIC32X4_REG(0, 25)
+#define AIC32X4_CLKOUTM                AIC32X4_REG(0, 26)
+#define AIC32X4_IFACE1         AIC32X4_REG(0, 27)
+#define AIC32X4_IFACE2         AIC32X4_REG(0, 28)
+#define AIC32X4_IFACE3         AIC32X4_REG(0, 29)
+#define AIC32X4_BCLKN          AIC32X4_REG(0, 30)
+#define AIC32X4_IFACE4         AIC32X4_REG(0, 31)
+#define AIC32X4_IFACE5         AIC32X4_REG(0, 32)
+#define AIC32X4_IFACE6         AIC32X4_REG(0, 33)
+#define AIC32X4_GPIOCTL                AIC32X4_REG(0, 52)
+#define AIC32X4_DOUTCTL                AIC32X4_REG(0, 53)
+#define AIC32X4_DINCTL         AIC32X4_REG(0, 54)
+#define AIC32X4_MISOCTL                AIC32X4_REG(0, 55)
+#define AIC32X4_SCLKCTL                AIC32X4_REG(0, 56)
+#define AIC32X4_DACSPB         AIC32X4_REG(0, 60)
+#define AIC32X4_ADCSPB         AIC32X4_REG(0, 61)
+#define AIC32X4_DACSETUP       AIC32X4_REG(0, 63)
+#define AIC32X4_DACMUTE                AIC32X4_REG(0, 64)
+#define AIC32X4_LDACVOL                AIC32X4_REG(0, 65)
+#define AIC32X4_RDACVOL                AIC32X4_REG(0, 66)
+#define AIC32X4_ADCSETUP       AIC32X4_REG(0, 81)
+#define        AIC32X4_ADCFGA          AIC32X4_REG(0, 82)
+#define AIC32X4_LADCVOL                AIC32X4_REG(0, 83)
+#define AIC32X4_RADCVOL                AIC32X4_REG(0, 84)
+#define AIC32X4_LAGC1          AIC32X4_REG(0, 86)
+#define AIC32X4_LAGC2          AIC32X4_REG(0, 87)
+#define AIC32X4_LAGC3          AIC32X4_REG(0, 88)
+#define AIC32X4_LAGC4          AIC32X4_REG(0, 89)
+#define AIC32X4_LAGC5          AIC32X4_REG(0, 90)
+#define AIC32X4_LAGC6          AIC32X4_REG(0, 91)
+#define AIC32X4_LAGC7          AIC32X4_REG(0, 92)
+#define AIC32X4_RAGC1          AIC32X4_REG(0, 94)
+#define AIC32X4_RAGC2          AIC32X4_REG(0, 95)
+#define AIC32X4_RAGC3          AIC32X4_REG(0, 96)
+#define AIC32X4_RAGC4          AIC32X4_REG(0, 97)
+#define AIC32X4_RAGC5          AIC32X4_REG(0, 98)
+#define AIC32X4_RAGC6          AIC32X4_REG(0, 99)
+#define AIC32X4_RAGC7          AIC32X4_REG(0, 100)
+
+#define AIC32X4_PWRCFG         AIC32X4_REG(1, 1)
+#define AIC32X4_LDOCTL         AIC32X4_REG(1, 2)
+#define AIC32X4_OUTPWRCTL      AIC32X4_REG(1, 9)
+#define AIC32X4_CMMODE         AIC32X4_REG(1, 10)
+#define AIC32X4_HPLROUTE       AIC32X4_REG(1, 12)
+#define AIC32X4_HPRROUTE       AIC32X4_REG(1, 13)
+#define AIC32X4_LOLROUTE       AIC32X4_REG(1, 14)
+#define AIC32X4_LORROUTE       AIC32X4_REG(1, 15)
+#define        AIC32X4_HPLGAIN         AIC32X4_REG(1, 16)
+#define        AIC32X4_HPRGAIN         AIC32X4_REG(1, 17)
+#define        AIC32X4_LOLGAIN         AIC32X4_REG(1, 18)
+#define        AIC32X4_LORGAIN         AIC32X4_REG(1, 19)
+#define AIC32X4_HEADSTART      AIC32X4_REG(1, 20)
+#define AIC32X4_MICBIAS                AIC32X4_REG(1, 51)
+#define AIC32X4_LMICPGAPIN     AIC32X4_REG(1, 52)
+#define AIC32X4_LMICPGANIN     AIC32X4_REG(1, 54)
+#define AIC32X4_RMICPGAPIN     AIC32X4_REG(1, 55)
+#define AIC32X4_RMICPGANIN     AIC32X4_REG(1, 57)
+#define AIC32X4_FLOATINGINPUT  AIC32X4_REG(1, 58)
+#define AIC32X4_LMICPGAVOL     AIC32X4_REG(1, 59)
+#define AIC32X4_RMICPGAVOL     AIC32X4_REG(1, 60)
+
+/* Bits, masks, and shifts */
+
+/* AIC32X4_CLKMUX */
+#define AIC32X4_PLL_CLKIN_MASK         GENMASK(3, 2)
+#define AIC32X4_PLL_CLKIN_SHIFT                (2)
+#define AIC32X4_PLL_CLKIN_MCLK         (0x00)
+#define AIC32X4_PLL_CLKIN_BCKL         (0x01)
+#define AIC32X4_PLL_CLKIN_GPIO1                (0x02)
+#define AIC32X4_PLL_CLKIN_DIN          (0x03)
+#define AIC32X4_CODEC_CLKIN_MASK       GENMASK(1, 0)
+#define AIC32X4_CODEC_CLKIN_SHIFT      (0)
+#define AIC32X4_CODEC_CLKIN_MCLK       (0x00)
+#define AIC32X4_CODEC_CLKIN_BCLK       (0x01)
+#define AIC32X4_CODEC_CLKIN_GPIO1      (0x02)
+#define AIC32X4_CODEC_CLKIN_PLL                (0x03)
+
+/* AIC32X4_PLLPR */
+#define AIC32X4_PLLEN                  BIT(7)
+#define AIC32X4_PLL_P_MASK             GENMASK(6, 4)
+#define AIC32X4_PLL_P_SHIFT            (4)
+#define AIC32X4_PLL_R_MASK             GENMASK(3, 0)
+
+/* AIC32X4_NDAC */
+#define AIC32X4_NDACEN                 BIT(7)
+#define AIC32X4_NDAC_MASK              GENMASK(6, 0)
+
+/* AIC32X4_MDAC */
+#define AIC32X4_MDACEN                 BIT(7)
+#define AIC32X4_MDAC_MASK              GENMASK(6, 0)
+
+/* AIC32X4_NADC */
+#define AIC32X4_NADCEN                 BIT(7)
+#define AIC32X4_NADC_MASK              GENMASK(6, 0)
+
+/* AIC32X4_MADC */
+#define AIC32X4_MADCEN                 BIT(7)
+#define AIC32X4_MADC_MASK              GENMASK(6, 0)
+
+/* AIC32X4_BCLKN */
+#define AIC32X4_BCLKEN                 BIT(7)
+#define AIC32X4_BCLK_MASK              GENMASK(6, 0)
+
+/* AIC32X4_IFACE1 */
+#define AIC32X4_IFACE1_DATATYPE_MASK   GENMASK(7, 6)
+#define AIC32X4_IFACE1_DATATYPE_SHIFT  (6)
+#define AIC32X4_I2S_MODE               (0x00)
+#define AIC32X4_DSP_MODE               (0x01)
+#define AIC32X4_RIGHT_JUSTIFIED_MODE   (0x02)
+#define AIC32X4_LEFT_JUSTIFIED_MODE    (0x03)
+#define AIC32X4_IFACE1_DATALEN_MASK    GENMASK(5, 4)
+#define AIC32X4_IFACE1_DATALEN_SHIFT   (4)
+#define AIC32X4_WORD_LEN_16BITS                (0x00)
+#define AIC32X4_WORD_LEN_20BITS                (0x01)
+#define AIC32X4_WORD_LEN_24BITS                (0x02)
+#define AIC32X4_WORD_LEN_32BITS                (0x03)
+#define AIC32X4_IFACE1_MASTER_MASK     GENMASK(3, 2)
+#define AIC32X4_BCLKMASTER             BIT(2)
+#define AIC32X4_WCLKMASTER             BIT(3)
+
+/* AIC32X4_IFACE2 */
+#define AIC32X4_DATA_OFFSET_MASK       GENMASK(7, 0)
+
+/* AIC32X4_IFACE3 */
+#define AIC32X4_BCLKINV_MASK           BIT(3)
+#define AIC32X4_BDIVCLK_MASK           GENMASK(1, 0)
+#define AIC32X4_BDIVCLK_SHIFT          (0)
+#define AIC32X4_DAC2BCLK               (0x00)
+#define AIC32X4_DACMOD2BCLK            (0x01)
+#define AIC32X4_ADC2BCLK               (0x02)
+#define AIC32X4_ADCMOD2BCLK            (0x03)
+
+/* AIC32X4_DACSETUP */
+#define AIC32X4_DAC_CHAN_MASK          GENMASK(5, 2)
+#define AIC32X4_LDAC2RCHN              BIT(5)
+#define AIC32X4_LDAC2LCHN              BIT(4)
+#define AIC32X4_RDAC2LCHN              BIT(3)
+#define AIC32X4_RDAC2RCHN              BIT(2)
+
+/* AIC32X4_DACMUTE */
+#define AIC32X4_MUTEON                 0x0C
+
+/* AIC32X4_ADCSETUP */
+#define AIC32X4_LADC_EN                        BIT(7)
+#define AIC32X4_RADC_EN                        BIT(6)
+
+/* AIC32X4_PWRCFG */
+#define AIC32X4_AVDDWEAKDISABLE                BIT(3)
+
+/* AIC32X4_LDOCTL */
+#define AIC32X4_LDOCTLEN               BIT(0)
+
+/* AIC32X4_CMMODE */
+#define AIC32X4_LDOIN_18_36            BIT(0)
+#define AIC32X4_LDOIN2HP               BIT(1)
+
+/* AIC32X4_MICBIAS */
+#define AIC32X4_MICBIAS_LDOIN          BIT(3)
 #define AIC32X4_MICBIAS_2075V          0x60
 
+/* AIC32X4_LMICPGANIN */
 #define AIC32X4_LMICPGANIN_IN2R_10K    0x10
 #define AIC32X4_LMICPGANIN_CM1L_10K    0x40
+
+/* AIC32X4_RMICPGANIN */
 #define AIC32X4_RMICPGANIN_IN1L_10K    0x10
 #define AIC32X4_RMICPGANIN_CM1R_10K    0x40
 
-#define AIC32X4_LMICPGAVOL_NOGAIN      0x80
-#define AIC32X4_RMICPGAVOL_NOGAIN      0x80
-
-#define AIC32X4_BCLKMASTER             0x08
-#define AIC32X4_WCLKMASTER             0x04
-#define AIC32X4_PLLEN                  (0x01 << 7)
-#define AIC32X4_NDACEN                 (0x01 << 7)
-#define AIC32X4_MDACEN                 (0x01 << 7)
-#define AIC32X4_NADCEN                 (0x01 << 7)
-#define AIC32X4_MADCEN                 (0x01 << 7)
-#define AIC32X4_BCLKEN                 (0x01 << 7)
-#define AIC32X4_DACEN                  (0x03 << 6)
-#define AIC32X4_RDAC2LCHN              (0x02 << 2)
-#define AIC32X4_LDAC2RCHN              (0x02 << 4)
-#define AIC32X4_LDAC2LCHN              (0x01 << 4)
-#define AIC32X4_RDAC2RCHN              (0x01 << 2)
-#define AIC32X4_DAC_CHAN_MASK          0x3c
-
-#define AIC32X4_SSTEP2WCLK             0x01
-#define AIC32X4_MUTEON                 0x0C
-#define        AIC32X4_DACMOD2BCLK             0x01
-
 #endif                         /* _TLV320AIC32X4_H */
index 06f9257..b751cad 100644 (file)
@@ -1804,11 +1804,18 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
                if (!ai3x_setup)
                        return -ENOMEM;
 
-               ret = of_get_named_gpio(np, "gpio-reset", 0);
-               if (ret >= 0)
+               ret = of_get_named_gpio(np, "reset-gpios", 0);
+               if (ret >= 0) {
                        aic3x->gpio_reset = ret;
-               else
-                       aic3x->gpio_reset = -1;
+               } else {
+                       ret = of_get_named_gpio(np, "gpio-reset", 0);
+                       if (ret > 0) {
+                               dev_warn(&i2c->dev, "Using deprecated property \"gpio-reset\", please update your DT");
+                               aic3x->gpio_reset = ret;
+                       } else {
+                               aic3x->gpio_reset = -1;
+                       }
+               }
 
                if (of_property_read_u32_array(np, "ai3x-gpio-func",
                                        ai3x_setup->gpio_func, 2) >= 0) {
index 5b94a15..8c71d2f 100644 (file)
@@ -106,6 +106,7 @@ struct tlv320dac33_priv {
        int mode1_latency;              /* latency caused by the i2c writes in
                                         * us */
        u8 burst_bclkdiv;               /* BCLK divider value in burst mode */
+       u8 *reg_cache;
        unsigned int burst_rate;        /* Interface speed in Burst modes */
 
        int keep_bclk;                  /* Keep the BCLK continuously running
@@ -121,7 +122,7 @@ struct tlv320dac33_priv {
        unsigned int uthr;
 
        enum dac33_state state;
-       void *control_data;
+       struct i2c_client *i2c;
 };
 
 static const u8 dac33_reg[DAC33_CACHEREGNUM] = {
@@ -173,7 +174,8 @@ static const u8 dac33_reg[DAC33_CACHEREGNUM] = {
 static inline unsigned int dac33_read_reg_cache(struct snd_soc_codec *codec,
                                                unsigned reg)
 {
-       u8 *cache = codec->reg_cache;
+       struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+       u8 *cache = dac33->reg_cache;
        if (reg >= DAC33_CACHEREGNUM)
                return 0;
 
@@ -183,7 +185,8 @@ static inline unsigned int dac33_read_reg_cache(struct snd_soc_codec *codec,
 static inline void dac33_write_reg_cache(struct snd_soc_codec *codec,
                                         u8 reg, u8 value)
 {
-       u8 *cache = codec->reg_cache;
+       struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+       u8 *cache = dac33->reg_cache;
        if (reg >= DAC33_CACHEREGNUM)
                return;
 
@@ -200,7 +203,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
 
        /* If powered off, return the cached value */
        if (dac33->chip_power) {
-               val = i2c_smbus_read_byte_data(codec->control_data, value[0]);
+               val = i2c_smbus_read_byte_data(dac33->i2c, value[0]);
                if (val < 0) {
                        dev_err(codec->dev, "Read failed (%d)\n", val);
                        value[0] = dac33_read_reg_cache(codec, reg);
@@ -233,7 +236,7 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
 
        dac33_write_reg_cache(codec, data[0], data[1]);
        if (dac33->chip_power) {
-               ret = codec->hw_write(codec->control_data, data, 2);
+               ret = i2c_master_send(dac33->i2c, data, 2);
                if (ret != 2)
                        dev_err(codec->dev, "Write failed (%d)\n", ret);
                else
@@ -244,7 +247,7 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
 }
 
 static int dac33_write_locked(struct snd_soc_codec *codec, unsigned int reg,
-                      unsigned int value)
+                             unsigned int value)
 {
        struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
        int ret;
@@ -280,7 +283,7 @@ static int dac33_write16(struct snd_soc_codec *codec, unsigned int reg,
        if (dac33->chip_power) {
                /* We need to set autoincrement mode for 16 bit writes */
                data[0] |= DAC33_I2C_ADDR_AUTOINC;
-               ret = codec->hw_write(codec->control_data, data, 3);
+               ret = i2c_master_send(dac33->i2c, data, 3);
                if (ret != 3)
                        dev_err(codec->dev, "Write failed (%d)\n", ret);
                else
@@ -1379,8 +1382,6 @@ static int dac33_soc_probe(struct snd_soc_codec *codec)
        struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
        int ret = 0;
 
-       codec->control_data = dac33->control_data;
-       codec->hw_write = (hw_write_t) i2c_master_send;
        dac33->codec = codec;
 
        /* Read the tlv320dac33 ID registers */
@@ -1438,9 +1439,7 @@ static const struct snd_soc_codec_driver soc_codec_dev_tlv320dac33 = {
        .write = dac33_write_locked,
        .set_bias_level = dac33_set_bias_level,
        .idle_bias_off = true,
-       .reg_cache_size = ARRAY_SIZE(dac33_reg),
-       .reg_word_size = sizeof(u8),
-       .reg_cache_default = dac33_reg,
+
        .probe = dac33_soc_probe,
        .remove = dac33_soc_remove,
 
@@ -1499,7 +1498,14 @@ static int dac33_i2c_probe(struct i2c_client *client,
        if (dac33 == NULL)
                return -ENOMEM;
 
-       dac33->control_data = client;
+       dac33->reg_cache = devm_kmemdup(&client->dev,
+                                       dac33_reg,
+                                       ARRAY_SIZE(dac33_reg) * sizeof(u8),
+                                       GFP_KERNEL);
+       if (!dac33->reg_cache)
+               return -ENOMEM;
+
+       dac33->i2c = client;
        mutex_init(&dac33->mutex);
        spin_lock_init(&dac33->lock);
 
index 738e04b..1271e7e 100644 (file)
@@ -241,7 +241,7 @@ int ts3a227e_enable_jack_detect(struct snd_soc_component *component,
 {
        struct ts3a227e *ts3a227e = snd_soc_component_get_drvdata(component);
 
-       snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+       snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/codecs/tscs42xx.c b/sound/soc/codecs/tscs42xx.c
new file mode 100644 (file)
index 0000000..e7661d0
--- /dev/null
@@ -0,0 +1,1456 @@
+// SPDX-License-Identifier: GPL-2.0
+// tscs42xx.c -- TSCS42xx ALSA SoC Audio driver
+// Copyright 2017 Tempo Semiconductor, Inc.
+// Author: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tscs42xx.h"
+
+#define COEFF_SIZE 3
+#define BIQUAD_COEFF_COUNT 5
+#define BIQUAD_SIZE (COEFF_SIZE * BIQUAD_COEFF_COUNT)
+
+#define COEFF_RAM_MAX_ADDR 0xcd
+#define COEFF_RAM_COEFF_COUNT (COEFF_RAM_MAX_ADDR + 1)
+#define COEFF_RAM_SIZE (COEFF_SIZE * COEFF_RAM_COEFF_COUNT)
+
+struct tscs42xx {
+
+       int bclk_ratio;
+       int samplerate;
+       unsigned int blrcm;
+       struct mutex audio_params_lock;
+
+       u8 coeff_ram[COEFF_RAM_SIZE];
+       bool coeff_ram_synced;
+       struct mutex coeff_ram_lock;
+
+       struct mutex pll_lock;
+
+       struct regmap *regmap;
+
+       struct device *dev;
+};
+
+struct coeff_ram_ctl {
+       unsigned int addr;
+       struct soc_bytes_ext bytes_ext;
+};
+
+static bool tscs42xx_volatile(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case R_DACCRWRL:
+       case R_DACCRWRM:
+       case R_DACCRWRH:
+       case R_DACCRRDL:
+       case R_DACCRRDM:
+       case R_DACCRRDH:
+       case R_DACCRSTAT:
+       case R_DACCRADDR:
+       case R_PLLCTL0:
+               return true;
+       default:
+               return false;
+       };
+}
+
+static bool tscs42xx_precious(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case R_DACCRWRL:
+       case R_DACCRWRM:
+       case R_DACCRWRH:
+       case R_DACCRRDL:
+       case R_DACCRRDM:
+       case R_DACCRRDH:
+               return true;
+       default:
+               return false;
+       };
+}
+
+static const struct regmap_config tscs42xx_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .volatile_reg = tscs42xx_volatile,
+       .precious_reg = tscs42xx_precious,
+       .max_register = R_DACMBCREL3H,
+
+       .cache_type = REGCACHE_RBTREE,
+       .can_multi_write = true,
+};
+
+#define MAX_PLL_LOCK_20MS_WAITS 1
+static bool plls_locked(struct snd_soc_codec *codec)
+{
+       int ret;
+       int count = MAX_PLL_LOCK_20MS_WAITS;
+
+       do {
+               ret = snd_soc_read(codec, R_PLLCTL0);
+               if (ret < 0) {
+                       dev_err(codec->dev,
+                               "Failed to read PLL lock status (%d)\n", ret);
+                       return false;
+               } else if (ret > 0) {
+                       return true;
+               }
+               msleep(20);
+       } while (count--);
+
+       return false;
+}
+
+static int sample_rate_to_pll_freq_out(int sample_rate)
+{
+       switch (sample_rate) {
+       case 11025:
+       case 22050:
+       case 44100:
+       case 88200:
+               return 112896000;
+       case 8000:
+       case 16000:
+       case 32000:
+       case 48000:
+       case 96000:
+               return 122880000;
+       default:
+               return -EINVAL;
+       }
+}
+
+#define DACCRSTAT_MAX_TRYS 10
+static int write_coeff_ram(struct snd_soc_codec *codec, u8 *coeff_ram,
+       unsigned int addr, unsigned int coeff_cnt)
+{
+       struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+       int cnt;
+       int trys;
+       int ret;
+
+       for (cnt = 0; cnt < coeff_cnt; cnt++, addr++) {
+
+               for (trys = 0; trys < DACCRSTAT_MAX_TRYS; trys++) {
+                       ret = snd_soc_read(codec, R_DACCRSTAT);
+                       if (ret < 0) {
+                               dev_err(codec->dev,
+                                       "Failed to read stat (%d)\n", ret);
+                               return ret;
+                       }
+                       if (!ret)
+                               break;
+               }
+
+               if (trys == DACCRSTAT_MAX_TRYS) {
+                       ret = -EIO;
+                       dev_err(codec->dev,
+                               "dac coefficient write error (%d)\n", ret);
+                       return ret;
+               }
+
+               ret = regmap_write(tscs42xx->regmap, R_DACCRADDR, addr);
+               if (ret < 0) {
+                       dev_err(codec->dev,
+                               "Failed to write dac ram address (%d)\n", ret);
+                       return ret;
+               }
+
+               ret = regmap_bulk_write(tscs42xx->regmap, R_DACCRWRL,
+                       &coeff_ram[addr * COEFF_SIZE],
+                       COEFF_SIZE);
+               if (ret < 0) {
+                       dev_err(codec->dev,
+                               "Failed to write dac ram (%d)\n", ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int power_up_audio_plls(struct snd_soc_codec *codec)
+{
+       struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+       int freq_out;
+       int ret;
+       unsigned int mask;
+       unsigned int val;
+
+       freq_out = sample_rate_to_pll_freq_out(tscs42xx->samplerate);
+       switch (freq_out) {
+       case 122880000: /* 48k */
+               mask = RM_PLLCTL1C_PDB_PLL1;
+               val = RV_PLLCTL1C_PDB_PLL1_ENABLE;
+               break;
+       case 112896000: /* 44.1k */
+               mask = RM_PLLCTL1C_PDB_PLL2;
+               val = RV_PLLCTL1C_PDB_PLL2_ENABLE;
+               break;
+       default:
+               ret = -EINVAL;
+               dev_err(codec->dev, "Unrecognized PLL output freq (%d)\n", ret);
+               return ret;
+       }
+
+       mutex_lock(&tscs42xx->pll_lock);
+
+       ret = snd_soc_update_bits(codec, R_PLLCTL1C, mask, val);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to turn PLL on (%d)\n", ret);
+               goto exit;
+       }
+
+       if (!plls_locked(codec)) {
+               dev_err(codec->dev, "Failed to lock plls\n");
+               ret = -ENOMSG;
+               goto exit;
+       }
+
+       ret = 0;
+exit:
+       mutex_unlock(&tscs42xx->pll_lock);
+
+       return ret;
+}
+
+static int power_down_audio_plls(struct snd_soc_codec *codec)
+{
+       struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       mutex_lock(&tscs42xx->pll_lock);
+
+       ret = snd_soc_update_bits(codec, R_PLLCTL1C,
+                       RM_PLLCTL1C_PDB_PLL1,
+                       RV_PLLCTL1C_PDB_PLL1_DISABLE);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to turn PLL off (%d)\n", ret);
+               goto exit;
+       }
+       ret = snd_soc_update_bits(codec, R_PLLCTL1C,
+                       RM_PLLCTL1C_PDB_PLL2,
+                       RV_PLLCTL1C_PDB_PLL2_DISABLE);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to turn PLL off (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = 0;
+exit:
+       mutex_unlock(&tscs42xx->pll_lock);
+
+       return ret;
+}
+
+static int coeff_ram_get(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+       struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+       struct coeff_ram_ctl *ctl =
+               (struct coeff_ram_ctl *)kcontrol->private_value;
+       struct soc_bytes_ext *params = &ctl->bytes_ext;
+
+       mutex_lock(&tscs42xx->coeff_ram_lock);
+
+       memcpy(ucontrol->value.bytes.data,
+               &tscs42xx->coeff_ram[ctl->addr * COEFF_SIZE], params->max);
+
+       mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+       return 0;
+}
+
+static int coeff_ram_put(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+       struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+       struct coeff_ram_ctl *ctl =
+               (struct coeff_ram_ctl *)kcontrol->private_value;
+       struct soc_bytes_ext *params = &ctl->bytes_ext;
+       unsigned int coeff_cnt = params->max / COEFF_SIZE;
+       int ret;
+
+       mutex_lock(&tscs42xx->coeff_ram_lock);
+
+       tscs42xx->coeff_ram_synced = false;
+
+       memcpy(&tscs42xx->coeff_ram[ctl->addr * COEFF_SIZE],
+               ucontrol->value.bytes.data, params->max);
+
+       mutex_lock(&tscs42xx->pll_lock);
+
+       if (plls_locked(codec)) {
+               ret = write_coeff_ram(codec, tscs42xx->coeff_ram,
+                       ctl->addr, coeff_cnt);
+               if (ret < 0) {
+                       dev_err(codec->dev,
+                               "Failed to flush coeff ram cache (%d)\n", ret);
+                       goto exit;
+               }
+               tscs42xx->coeff_ram_synced = true;
+       }
+
+       ret = 0;
+exit:
+       mutex_unlock(&tscs42xx->pll_lock);
+
+       mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+       return ret;
+}
+
+/* Input L Capture Route */
+static char const * const input_select_text[] = {
+       "Line 1", "Line 2", "Line 3", "D2S"
+};
+
+static const struct soc_enum left_input_select_enum =
+SOC_ENUM_SINGLE(R_INSELL, FB_INSELL, ARRAY_SIZE(input_select_text),
+               input_select_text);
+
+static const struct snd_kcontrol_new left_input_select =
+SOC_DAPM_ENUM("LEFT_INPUT_SELECT_ENUM", left_input_select_enum);
+
+/* Input R Capture Route */
+static const struct soc_enum right_input_select_enum =
+SOC_ENUM_SINGLE(R_INSELR, FB_INSELR, ARRAY_SIZE(input_select_text),
+               input_select_text);
+
+static const struct snd_kcontrol_new right_input_select =
+SOC_DAPM_ENUM("RIGHT_INPUT_SELECT_ENUM", right_input_select_enum);
+
+/* Input Channel Mapping */
+static char const * const ch_map_select_text[] = {
+       "Normal", "Left to Right", "Right to Left", "Swap"
+};
+
+static const struct soc_enum ch_map_select_enum =
+SOC_ENUM_SINGLE(R_AIC2, FB_AIC2_ADCDSEL, ARRAY_SIZE(ch_map_select_text),
+               ch_map_select_text);
+
+static int dapm_vref_event(struct snd_soc_dapm_widget *w,
+                        struct snd_kcontrol *kcontrol, int event)
+{
+       msleep(20);
+       return 0;
+}
+
+static int dapm_micb_event(struct snd_soc_dapm_widget *w,
+                        struct snd_kcontrol *kcontrol, int event)
+{
+       msleep(20);
+       return 0;
+}
+
+static int pll_event(struct snd_soc_dapm_widget *w,
+                    struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       int ret;
+
+       if (SND_SOC_DAPM_EVENT_ON(event))
+               ret = power_up_audio_plls(codec);
+       else
+               ret = power_down_audio_plls(codec);
+
+       return ret;
+}
+
+static int dac_event(struct snd_soc_dapm_widget *w,
+                    struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       mutex_lock(&tscs42xx->coeff_ram_lock);
+
+       if (tscs42xx->coeff_ram_synced == false) {
+               ret = write_coeff_ram(codec, tscs42xx->coeff_ram, 0x00,
+                       COEFF_RAM_COEFF_COUNT);
+               if (ret < 0)
+                       goto exit;
+               tscs42xx->coeff_ram_synced = true;
+       }
+
+       ret = 0;
+exit:
+       mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+       return ret;
+}
+
+static const struct snd_soc_dapm_widget tscs42xx_dapm_widgets[] = {
+       /* Vref */
+       SND_SOC_DAPM_SUPPLY_S("Vref", 1, R_PWRM2, FB_PWRM2_VREF, 0,
+               dapm_vref_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+       /* PLL */
+       SND_SOC_DAPM_SUPPLY("PLL", SND_SOC_NOPM, 0, 0, pll_event,
+               SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+       /* Headphone */
+       SND_SOC_DAPM_DAC_E("DAC L", "HiFi Playback", R_PWRM2, FB_PWRM2_HPL, 0,
+                       dac_event, SND_SOC_DAPM_POST_PMU),
+       SND_SOC_DAPM_DAC_E("DAC R", "HiFi Playback", R_PWRM2, FB_PWRM2_HPR, 0,
+                       dac_event, SND_SOC_DAPM_POST_PMU),
+       SND_SOC_DAPM_OUTPUT("Headphone L"),
+       SND_SOC_DAPM_OUTPUT("Headphone R"),
+
+       /* Speaker */
+       SND_SOC_DAPM_DAC_E("ClassD L", "HiFi Playback",
+               R_PWRM2, FB_PWRM2_SPKL, 0,
+               dac_event, SND_SOC_DAPM_POST_PMU),
+       SND_SOC_DAPM_DAC_E("ClassD R", "HiFi Playback",
+               R_PWRM2, FB_PWRM2_SPKR, 0,
+               dac_event, SND_SOC_DAPM_POST_PMU),
+       SND_SOC_DAPM_OUTPUT("Speaker L"),
+       SND_SOC_DAPM_OUTPUT("Speaker R"),
+
+       /* Capture */
+       SND_SOC_DAPM_PGA("Analog In PGA L", R_PWRM1, FB_PWRM1_PGAL, 0, NULL, 0),
+       SND_SOC_DAPM_PGA("Analog In PGA R", R_PWRM1, FB_PWRM1_PGAR, 0, NULL, 0),
+       SND_SOC_DAPM_PGA("Analog Boost L", R_PWRM1, FB_PWRM1_BSTL, 0, NULL, 0),
+       SND_SOC_DAPM_PGA("Analog Boost R", R_PWRM1, FB_PWRM1_BSTR, 0, NULL, 0),
+       SND_SOC_DAPM_PGA("ADC Mute", R_CNVRTR0, FB_CNVRTR0_HPOR, true, NULL, 0),
+       SND_SOC_DAPM_ADC("ADC L", "HiFi Capture", R_PWRM1, FB_PWRM1_ADCL, 0),
+       SND_SOC_DAPM_ADC("ADC R", "HiFi Capture", R_PWRM1, FB_PWRM1_ADCR, 0),
+
+       /* Capture Input */
+       SND_SOC_DAPM_MUX("Input L Capture Route", R_PWRM2,
+                       FB_PWRM2_INSELL, 0, &left_input_select),
+       SND_SOC_DAPM_MUX("Input R Capture Route", R_PWRM2,
+                       FB_PWRM2_INSELR, 0, &right_input_select),
+
+       /* Digital Mic */
+       SND_SOC_DAPM_SUPPLY_S("Digital Mic Enable", 2, R_DMICCTL,
+               FB_DMICCTL_DMICEN, 0, NULL,
+               SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+       /* Analog Mic */
+       SND_SOC_DAPM_SUPPLY_S("Mic Bias", 2, R_PWRM1, FB_PWRM1_MICB,
+               0, dapm_micb_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+       /* Line In */
+       SND_SOC_DAPM_INPUT("Line In 1 L"),
+       SND_SOC_DAPM_INPUT("Line In 1 R"),
+       SND_SOC_DAPM_INPUT("Line In 2 L"),
+       SND_SOC_DAPM_INPUT("Line In 2 R"),
+       SND_SOC_DAPM_INPUT("Line In 3 L"),
+       SND_SOC_DAPM_INPUT("Line In 3 R"),
+};
+
+static const struct snd_soc_dapm_route tscs42xx_intercon[] = {
+       {"DAC L", NULL, "PLL"},
+       {"DAC R", NULL, "PLL"},
+       {"DAC L", NULL, "Vref"},
+       {"DAC R", NULL, "Vref"},
+       {"Headphone L", NULL, "DAC L"},
+       {"Headphone R", NULL, "DAC R"},
+
+       {"ClassD L", NULL, "PLL"},
+       {"ClassD R", NULL, "PLL"},
+       {"ClassD L", NULL, "Vref"},
+       {"ClassD R", NULL, "Vref"},
+       {"Speaker L", NULL, "ClassD L"},
+       {"Speaker R", NULL, "ClassD R"},
+
+       {"Input L Capture Route", NULL, "Vref"},
+       {"Input R Capture Route", NULL, "Vref"},
+
+       {"Mic Bias", NULL, "Vref"},
+
+       {"Input L Capture Route", "Line 1", "Line In 1 L"},
+       {"Input R Capture Route", "Line 1", "Line In 1 R"},
+       {"Input L Capture Route", "Line 2", "Line In 2 L"},
+       {"Input R Capture Route", "Line 2", "Line In 2 R"},
+       {"Input L Capture Route", "Line 3", "Line In 3 L"},
+       {"Input R Capture Route", "Line 3", "Line In 3 R"},
+
+       {"Analog In PGA L", NULL, "Input L Capture Route"},
+       {"Analog In PGA R", NULL, "Input R Capture Route"},
+       {"Analog Boost L", NULL, "Analog In PGA L"},
+       {"Analog Boost R", NULL, "Analog In PGA R"},
+       {"ADC Mute", NULL, "Analog Boost L"},
+       {"ADC Mute", NULL, "Analog Boost R"},
+       {"ADC L", NULL, "PLL"},
+       {"ADC R", NULL, "PLL"},
+       {"ADC L", NULL, "ADC Mute"},
+       {"ADC R", NULL, "ADC Mute"},
+};
+
+/************
+ * CONTROLS *
+ ************/
+
+static char const * const eq_band_enable_text[] = {
+       "Prescale only",
+       "Band1",
+       "Band1:2",
+       "Band1:3",
+       "Band1:4",
+       "Band1:5",
+       "Band1:6",
+};
+
+static char const * const level_detection_text[] = {
+       "Average",
+       "Peak",
+};
+
+static char const * const level_detection_window_text[] = {
+       "512 Samples",
+       "64 Samples",
+};
+
+static char const * const compressor_ratio_text[] = {
+       "Reserved", "1.5:1", "2:1", "3:1", "4:1", "5:1", "6:1",
+       "7:1", "8:1", "9:1", "10:1", "11:1", "12:1", "13:1", "14:1",
+       "15:1", "16:1", "17:1", "18:1", "19:1", "20:1",
+};
+
+static DECLARE_TLV_DB_SCALE(hpvol_scale, -8850, 75, 0);
+static DECLARE_TLV_DB_SCALE(spkvol_scale, -7725, 75, 0);
+static DECLARE_TLV_DB_SCALE(dacvol_scale, -9563, 38, 0);
+static DECLARE_TLV_DB_SCALE(adcvol_scale, -7125, 38, 0);
+static DECLARE_TLV_DB_SCALE(invol_scale, -1725, 75, 0);
+static DECLARE_TLV_DB_SCALE(mic_boost_scale, 0, 1000, 0);
+static DECLARE_TLV_DB_MINMAX(mugain_scale, 0, 4650);
+static DECLARE_TLV_DB_MINMAX(compth_scale, -9562, 0);
+
+static const struct soc_enum eq1_band_enable_enum =
+       SOC_ENUM_SINGLE(R_CONFIG1, FB_CONFIG1_EQ1_BE,
+               ARRAY_SIZE(eq_band_enable_text), eq_band_enable_text);
+
+static const struct soc_enum eq2_band_enable_enum =
+       SOC_ENUM_SINGLE(R_CONFIG1, FB_CONFIG1_EQ2_BE,
+               ARRAY_SIZE(eq_band_enable_text), eq_band_enable_text);
+
+static const struct soc_enum cle_level_detection_enum =
+       SOC_ENUM_SINGLE(R_CLECTL, FB_CLECTL_LVL_MODE,
+               ARRAY_SIZE(level_detection_text),
+               level_detection_text);
+
+static const struct soc_enum cle_level_detection_window_enum =
+       SOC_ENUM_SINGLE(R_CLECTL, FB_CLECTL_WINDOWSEL,
+               ARRAY_SIZE(level_detection_window_text),
+               level_detection_window_text);
+
+static const struct soc_enum mbc_level_detection_enums[] = {
+       SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE1,
+               ARRAY_SIZE(level_detection_text),
+                       level_detection_text),
+       SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE2,
+               ARRAY_SIZE(level_detection_text),
+                       level_detection_text),
+       SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE3,
+               ARRAY_SIZE(level_detection_text),
+                       level_detection_text),
+};
+
+static const struct soc_enum mbc_level_detection_window_enums[] = {
+       SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL1,
+               ARRAY_SIZE(level_detection_window_text),
+                       level_detection_window_text),
+       SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL2,
+               ARRAY_SIZE(level_detection_window_text),
+                       level_detection_window_text),
+       SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL3,
+               ARRAY_SIZE(level_detection_window_text),
+                       level_detection_window_text),
+};
+
+static const struct soc_enum compressor_ratio_enum =
+       SOC_ENUM_SINGLE(R_CMPRAT, FB_CMPRAT,
+               ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc1_compressor_ratio_enum =
+       SOC_ENUM_SINGLE(R_DACMBCRAT1, FB_DACMBCRAT1_RATIO,
+               ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc2_compressor_ratio_enum =
+       SOC_ENUM_SINGLE(R_DACMBCRAT2, FB_DACMBCRAT2_RATIO,
+               ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc3_compressor_ratio_enum =
+       SOC_ENUM_SINGLE(R_DACMBCRAT3, FB_DACMBCRAT3_RATIO,
+               ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static int bytes_info_ext(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_info *ucontrol)
+{
+       struct coeff_ram_ctl *ctl =
+               (struct coeff_ram_ctl *)kcontrol->private_value;
+       struct soc_bytes_ext *params = &ctl->bytes_ext;
+
+       ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+       ucontrol->count = params->max;
+
+       return 0;
+}
+
+#define COEFF_RAM_CTL(xname, xcount, xaddr) \
+{      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+       .info = bytes_info_ext, \
+       .get = coeff_ram_get, .put = coeff_ram_put, \
+       .private_value = (unsigned long)&(struct coeff_ram_ctl) { \
+               .addr = xaddr, \
+               .bytes_ext = {.max = xcount, }, \
+       } \
+}
+
+static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
+       /* Volumes */
+       SOC_DOUBLE_R_TLV("Headphone Playback Volume", R_HPVOLL, R_HPVOLR,
+                       FB_HPVOLL, 0x7F, 0, hpvol_scale),
+       SOC_DOUBLE_R_TLV("Speaker Playback Volume", R_SPKVOLL, R_SPKVOLR,
+                       FB_SPKVOLL, 0x7F, 0, spkvol_scale),
+       SOC_DOUBLE_R_TLV("Master Playback Volume", R_DACVOLL, R_DACVOLR,
+                       FB_DACVOLL, 0xFF, 0, dacvol_scale),
+       SOC_DOUBLE_R_TLV("PCM Capture Volume", R_ADCVOLL, R_ADCVOLR,
+                       FB_ADCVOLL, 0xFF, 0, adcvol_scale),
+       SOC_DOUBLE_R_TLV("Master Capture Volume", R_INVOLL, R_INVOLR,
+                       FB_INVOLL, 0x3F, 0, invol_scale),
+
+       /* INSEL */
+       SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", R_INSELL, R_INSELR,
+                       FB_INSELL_MICBSTL, FV_INSELL_MICBSTL_30DB,
+                       0, mic_boost_scale),
+
+       /* Input Channel Map */
+       SOC_ENUM("Input Channel Map", ch_map_select_enum),
+
+       /* Coefficient Ram */
+       COEFF_RAM_CTL("Cascade1L BiQuad1", BIQUAD_SIZE, 0x00),
+       COEFF_RAM_CTL("Cascade1L BiQuad2", BIQUAD_SIZE, 0x05),
+       COEFF_RAM_CTL("Cascade1L BiQuad3", BIQUAD_SIZE, 0x0a),
+       COEFF_RAM_CTL("Cascade1L BiQuad4", BIQUAD_SIZE, 0x0f),
+       COEFF_RAM_CTL("Cascade1L BiQuad5", BIQUAD_SIZE, 0x14),
+       COEFF_RAM_CTL("Cascade1L BiQuad6", BIQUAD_SIZE, 0x19),
+
+       COEFF_RAM_CTL("Cascade1R BiQuad1", BIQUAD_SIZE, 0x20),
+       COEFF_RAM_CTL("Cascade1R BiQuad2", BIQUAD_SIZE, 0x25),
+       COEFF_RAM_CTL("Cascade1R BiQuad3", BIQUAD_SIZE, 0x2a),
+       COEFF_RAM_CTL("Cascade1R BiQuad4", BIQUAD_SIZE, 0x2f),
+       COEFF_RAM_CTL("Cascade1R BiQuad5", BIQUAD_SIZE, 0x34),
+       COEFF_RAM_CTL("Cascade1R BiQuad6", BIQUAD_SIZE, 0x39),
+
+       COEFF_RAM_CTL("Cascade1L Prescale", COEFF_SIZE, 0x1f),
+       COEFF_RAM_CTL("Cascade1R Prescale", COEFF_SIZE, 0x3f),
+
+       COEFF_RAM_CTL("Cascade2L BiQuad1", BIQUAD_SIZE, 0x40),
+       COEFF_RAM_CTL("Cascade2L BiQuad2", BIQUAD_SIZE, 0x45),
+       COEFF_RAM_CTL("Cascade2L BiQuad3", BIQUAD_SIZE, 0x4a),
+       COEFF_RAM_CTL("Cascade2L BiQuad4", BIQUAD_SIZE, 0x4f),
+       COEFF_RAM_CTL("Cascade2L BiQuad5", BIQUAD_SIZE, 0x54),
+       COEFF_RAM_CTL("Cascade2L BiQuad6", BIQUAD_SIZE, 0x59),
+
+       COEFF_RAM_CTL("Cascade2R BiQuad1", BIQUAD_SIZE, 0x60),
+       COEFF_RAM_CTL("Cascade2R BiQuad2", BIQUAD_SIZE, 0x65),
+       COEFF_RAM_CTL("Cascade2R BiQuad3", BIQUAD_SIZE, 0x6a),
+       COEFF_RAM_CTL("Cascade2R BiQuad4", BIQUAD_SIZE, 0x6f),
+       COEFF_RAM_CTL("Cascade2R BiQuad5", BIQUAD_SIZE, 0x74),
+       COEFF_RAM_CTL("Cascade2R BiQuad6", BIQUAD_SIZE, 0x79),
+
+       COEFF_RAM_CTL("Cascade2L Prescale", COEFF_SIZE, 0x5f),
+       COEFF_RAM_CTL("Cascade2R Prescale", COEFF_SIZE, 0x7f),
+
+       COEFF_RAM_CTL("Bass Extraction BiQuad1", BIQUAD_SIZE, 0x80),
+       COEFF_RAM_CTL("Bass Extraction BiQuad2", BIQUAD_SIZE, 0x85),
+
+       COEFF_RAM_CTL("Bass Non Linear Function 1", COEFF_SIZE, 0x8a),
+       COEFF_RAM_CTL("Bass Non Linear Function 2", COEFF_SIZE, 0x8b),
+
+       COEFF_RAM_CTL("Bass Limiter BiQuad", BIQUAD_SIZE, 0x8c),
+
+       COEFF_RAM_CTL("Bass Cut Off BiQuad", BIQUAD_SIZE, 0x91),
+
+       COEFF_RAM_CTL("Bass Mix", COEFF_SIZE, 0x96),
+
+       COEFF_RAM_CTL("Treb Extraction BiQuad1", BIQUAD_SIZE, 0x97),
+       COEFF_RAM_CTL("Treb Extraction BiQuad2", BIQUAD_SIZE, 0x9c),
+
+       COEFF_RAM_CTL("Treb Non Linear Function 1", COEFF_SIZE, 0xa1),
+       COEFF_RAM_CTL("Treb Non Linear Function 2", COEFF_SIZE, 0xa2),
+
+       COEFF_RAM_CTL("Treb Limiter BiQuad", BIQUAD_SIZE, 0xa3),
+
+       COEFF_RAM_CTL("Treb Cut Off BiQuad", BIQUAD_SIZE, 0xa8),
+
+       COEFF_RAM_CTL("Treb Mix", COEFF_SIZE, 0xad),
+
+       COEFF_RAM_CTL("3D", COEFF_SIZE, 0xae),
+
+       COEFF_RAM_CTL("3D Mix", COEFF_SIZE, 0xaf),
+
+       COEFF_RAM_CTL("MBC1 BiQuad1", BIQUAD_SIZE, 0xb0),
+       COEFF_RAM_CTL("MBC1 BiQuad2", BIQUAD_SIZE, 0xb5),
+
+       COEFF_RAM_CTL("MBC2 BiQuad1", BIQUAD_SIZE, 0xba),
+       COEFF_RAM_CTL("MBC2 BiQuad2", BIQUAD_SIZE, 0xbf),
+
+       COEFF_RAM_CTL("MBC3 BiQuad1", BIQUAD_SIZE, 0xc4),
+       COEFF_RAM_CTL("MBC3 BiQuad2", BIQUAD_SIZE, 0xc9),
+
+       /* EQ */
+       SOC_SINGLE("EQ1 Switch", R_CONFIG1, FB_CONFIG1_EQ1_EN, 1, 0),
+       SOC_SINGLE("EQ2 Switch", R_CONFIG1, FB_CONFIG1_EQ2_EN, 1, 0),
+       SOC_ENUM("EQ1 Band Enable", eq1_band_enable_enum),
+       SOC_ENUM("EQ2 Band Enable", eq2_band_enable_enum),
+
+       /* CLE */
+       SOC_ENUM("CLE Level Detect",
+               cle_level_detection_enum),
+       SOC_ENUM("CLE Level Detect Win",
+               cle_level_detection_window_enum),
+       SOC_SINGLE("Expander Switch",
+               R_CLECTL, FB_CLECTL_EXP_EN, 1, 0),
+       SOC_SINGLE("Limiter Switch",
+               R_CLECTL, FB_CLECTL_LIMIT_EN, 1, 0),
+       SOC_SINGLE("Comp Switch",
+               R_CLECTL, FB_CLECTL_COMP_EN, 1, 0),
+       SOC_SINGLE_TLV("CLE Make-Up Gain Playback Volume",
+               R_MUGAIN, FB_MUGAIN_CLEMUG, 0x1f, 0, mugain_scale),
+       SOC_SINGLE_TLV("Comp Thresh Playback Volume",
+               R_COMPTH, FB_COMPTH, 0xff, 0, compth_scale),
+       SOC_ENUM("Comp Ratio", compressor_ratio_enum),
+       SND_SOC_BYTES("Comp Atk Time", R_CATKTCL, 2),
+
+       /* Effects */
+       SOC_SINGLE("3D Switch", R_FXCTL, FB_FXCTL_3DEN, 1, 0),
+       SOC_SINGLE("Treble Switch", R_FXCTL, FB_FXCTL_TEEN, 1, 0),
+       SOC_SINGLE("Treble Bypass Switch", R_FXCTL, FB_FXCTL_TNLFBYPASS, 1, 0),
+       SOC_SINGLE("Bass Switch", R_FXCTL, FB_FXCTL_BEEN, 1, 0),
+       SOC_SINGLE("Bass Bypass Switch", R_FXCTL, FB_FXCTL_BNLFBYPASS, 1, 0),
+
+       /* MBC */
+       SOC_SINGLE("MBC Band1 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN1, 1, 0),
+       SOC_SINGLE("MBC Band2 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN2, 1, 0),
+       SOC_SINGLE("MBC Band3 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN3, 1, 0),
+       SOC_ENUM("MBC Band1 Level Detect",
+               mbc_level_detection_enums[0]),
+       SOC_ENUM("MBC Band2 Level Detect",
+               mbc_level_detection_enums[1]),
+       SOC_ENUM("MBC Band3 Level Detect",
+               mbc_level_detection_enums[2]),
+       SOC_ENUM("MBC Band1 Level Detect Win",
+               mbc_level_detection_window_enums[0]),
+       SOC_ENUM("MBC Band2 Level Detect Win",
+               mbc_level_detection_window_enums[1]),
+       SOC_ENUM("MBC Band3 Level Detect Win",
+               mbc_level_detection_window_enums[2]),
+
+       SOC_SINGLE("MBC1 Phase Invert Switch",
+               R_DACMBCMUG1, FB_DACMBCMUG1_PHASE, 1, 0),
+       SOC_SINGLE_TLV("DAC MBC1 Make-Up Gain Playback Volume",
+               R_DACMBCMUG1, FB_DACMBCMUG1_MUGAIN, 0x1f, 0, mugain_scale),
+       SOC_SINGLE_TLV("DAC MBC1 Comp Thresh Playback Volume",
+               R_DACMBCTHR1, FB_DACMBCTHR1_THRESH, 0xff, 0, compth_scale),
+       SOC_ENUM("DAC MBC1 Comp Ratio",
+               dac_mbc1_compressor_ratio_enum),
+       SND_SOC_BYTES("DAC MBC1 Comp Atk Time", R_DACMBCATK1L, 2),
+       SND_SOC_BYTES("DAC MBC1 Comp Rel Time Const",
+               R_DACMBCREL1L, 2),
+
+       SOC_SINGLE("MBC2 Phase Invert Switch",
+               R_DACMBCMUG2, FB_DACMBCMUG2_PHASE, 1, 0),
+       SOC_SINGLE_TLV("DAC MBC2 Make-Up Gain Playback Volume",
+               R_DACMBCMUG2, FB_DACMBCMUG2_MUGAIN, 0x1f, 0, mugain_scale),
+       SOC_SINGLE_TLV("DAC MBC2 Comp Thresh Playback Volume",
+               R_DACMBCTHR2, FB_DACMBCTHR2_THRESH, 0xff, 0, compth_scale),
+       SOC_ENUM("DAC MBC2 Comp Ratio",
+               dac_mbc2_compressor_ratio_enum),
+       SND_SOC_BYTES("DAC MBC2 Comp Atk Time", R_DACMBCATK2L, 2),
+       SND_SOC_BYTES("DAC MBC2 Comp Rel Time Const",
+               R_DACMBCREL2L, 2),
+
+       SOC_SINGLE("MBC3 Phase Invert Switch",
+               R_DACMBCMUG3, FB_DACMBCMUG3_PHASE, 1, 0),
+       SOC_SINGLE_TLV("DAC MBC3 Make-Up Gain Playback Volume",
+               R_DACMBCMUG3, FB_DACMBCMUG3_MUGAIN, 0x1f, 0, mugain_scale),
+       SOC_SINGLE_TLV("DAC MBC3 Comp Thresh Playback Volume",
+               R_DACMBCTHR3, FB_DACMBCTHR3_THRESH, 0xff, 0, compth_scale),
+       SOC_ENUM("DAC MBC3 Comp Ratio",
+               dac_mbc3_compressor_ratio_enum),
+       SND_SOC_BYTES("DAC MBC3 Comp Atk Time", R_DACMBCATK3L, 2),
+       SND_SOC_BYTES("DAC MBC3 Comp Rel Time Const",
+               R_DACMBCREL3L, 2),
+};
+
+static int setup_sample_format(struct snd_soc_codec *codec,
+               snd_pcm_format_t format)
+{
+       unsigned int width;
+       int ret;
+
+       switch (format) {
+       case SNDRV_PCM_FORMAT_S16_LE:
+               width = RV_AIC1_WL_16;
+               break;
+       case SNDRV_PCM_FORMAT_S20_3LE:
+               width = RV_AIC1_WL_20;
+               break;
+       case SNDRV_PCM_FORMAT_S24_LE:
+               width = RV_AIC1_WL_24;
+               break;
+       case SNDRV_PCM_FORMAT_S32_LE:
+               width = RV_AIC1_WL_32;
+               break;
+       default:
+               ret = -EINVAL;
+               dev_err(codec->dev, "Unsupported format width (%d)\n", ret);
+               return ret;
+       }
+       ret = snd_soc_update_bits(codec, R_AIC1, RM_AIC1_WL, width);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to set sample width (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int setup_sample_rate(struct snd_soc_codec *codec, unsigned int rate)
+{
+       struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+       unsigned int br, bm;
+       int ret;
+
+       switch (rate) {
+       case 8000:
+               br = RV_DACSR_DBR_32;
+               bm = RV_DACSR_DBM_PT25;
+               break;
+       case 16000:
+               br = RV_DACSR_DBR_32;
+               bm = RV_DACSR_DBM_PT5;
+               break;
+       case 24000:
+               br = RV_DACSR_DBR_48;
+               bm = RV_DACSR_DBM_PT5;
+               break;
+       case 32000:
+               br = RV_DACSR_DBR_32;
+               bm = RV_DACSR_DBM_1;
+               break;
+       case 48000:
+               br = RV_DACSR_DBR_48;
+               bm = RV_DACSR_DBM_1;
+               break;
+       case 96000:
+               br = RV_DACSR_DBR_48;
+               bm = RV_DACSR_DBM_2;
+               break;
+       case 11025:
+               br = RV_DACSR_DBR_44_1;
+               bm = RV_DACSR_DBM_PT25;
+               break;
+       case 22050:
+               br = RV_DACSR_DBR_44_1;
+               bm = RV_DACSR_DBM_PT5;
+               break;
+       case 44100:
+               br = RV_DACSR_DBR_44_1;
+               bm = RV_DACSR_DBM_1;
+               break;
+       case 88200:
+               br = RV_DACSR_DBR_44_1;
+               bm = RV_DACSR_DBM_2;
+               break;
+       default:
+               dev_err(codec->dev, "Unsupported sample rate %d\n", rate);
+               return -EINVAL;
+       }
+
+       /* DAC and ADC share bit and frame clock */
+       ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBR, br);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+               return ret;
+       }
+       ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBM, bm);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+               return ret;
+       }
+       ret = snd_soc_update_bits(codec, R_ADCSR, RM_DACSR_DBR, br);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+               return ret;
+       }
+       ret = snd_soc_update_bits(codec, R_ADCSR, RM_DACSR_DBM, bm);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+               return ret;
+       }
+
+       mutex_lock(&tscs42xx->audio_params_lock);
+
+       tscs42xx->samplerate = rate;
+
+       mutex_unlock(&tscs42xx->audio_params_lock);
+
+       return 0;
+}
+
+struct reg_setting {
+       unsigned int addr;
+       unsigned int val;
+       unsigned int mask;
+};
+
+#define PLL_REG_SETTINGS_COUNT 13
+struct pll_ctl {
+       int input_freq;
+       struct reg_setting settings[PLL_REG_SETTINGS_COUNT];
+};
+
+#define PLL_CTL(f, rt, rd, r1b_l, r9, ra, rb,          \
+               rc, r12, r1b_h, re, rf, r10, r11)       \
+       {                                               \
+               .input_freq = f,                        \
+               .settings = {                           \
+                       {R_TIMEBASE,  rt,   0xFF},      \
+                       {R_PLLCTLD,   rd,   0xFF},      \
+                       {R_PLLCTL1B, r1b_l, 0x0F},      \
+                       {R_PLLCTL9,   r9,   0xFF},      \
+                       {R_PLLCTLA,   ra,   0xFF},      \
+                       {R_PLLCTLB,   rb,   0xFF},      \
+                       {R_PLLCTLC,   rc,   0xFF},      \
+                       {R_PLLCTL12, r12,   0xFF},      \
+                       {R_PLLCTL1B, r1b_h, 0xF0},      \
+                       {R_PLLCTLE,   re,   0xFF},      \
+                       {R_PLLCTLF,   rf,   0xFF},      \
+                       {R_PLLCTL10, r10,   0xFF},      \
+                       {R_PLLCTL11, r11,   0xFF},      \
+               },                                      \
+       }
+
+static const struct pll_ctl pll_ctls[] = {
+       PLL_CTL(1411200, 0x05,
+               0x39, 0x04, 0x07, 0x02, 0xC3, 0x04,
+               0x1B, 0x10, 0x03, 0x03, 0xD0, 0x02),
+       PLL_CTL(1536000, 0x05,
+               0x1A, 0x04, 0x02, 0x03, 0xE0, 0x01,
+               0x1A, 0x10, 0x02, 0x03, 0xB9, 0x01),
+       PLL_CTL(2822400, 0x0A,
+               0x23, 0x04, 0x07, 0x04, 0xC3, 0x04,
+               0x22, 0x10, 0x05, 0x03, 0x58, 0x02),
+       PLL_CTL(3072000, 0x0B,
+               0x22, 0x04, 0x07, 0x03, 0x48, 0x03,
+               0x1A, 0x10, 0x04, 0x03, 0xB9, 0x01),
+       PLL_CTL(5644800, 0x15,
+               0x23, 0x04, 0x0E, 0x04, 0xC3, 0x04,
+               0x1A, 0x10, 0x08, 0x03, 0xE0, 0x01),
+       PLL_CTL(6144000, 0x17,
+               0x1A, 0x04, 0x08, 0x03, 0xE0, 0x01,
+               0x1A, 0x10, 0x08, 0x03, 0xB9, 0x01),
+       PLL_CTL(12000000, 0x2E,
+               0x1B, 0x04, 0x19, 0x03, 0x00, 0x03,
+               0x2A, 0x10, 0x19, 0x05, 0x98, 0x04),
+       PLL_CTL(19200000, 0x4A,
+               0x13, 0x04, 0x14, 0x03, 0x80, 0x01,
+               0x1A, 0x10, 0x19, 0x03, 0xB9, 0x01),
+       PLL_CTL(22000000, 0x55,
+               0x2A, 0x04, 0x37, 0x05, 0x00, 0x06,
+               0x22, 0x10, 0x26, 0x03, 0x49, 0x02),
+       PLL_CTL(22579200, 0x57,
+               0x22, 0x04, 0x31, 0x03, 0x20, 0x03,
+               0x1A, 0x10, 0x1D, 0x03, 0xB3, 0x01),
+       PLL_CTL(24000000, 0x5D,
+               0x13, 0x04, 0x19, 0x03, 0x80, 0x01,
+               0x1B, 0x10, 0x19, 0x05, 0x4C, 0x02),
+       PLL_CTL(24576000, 0x5F,
+               0x13, 0x04, 0x1D, 0x03, 0xB3, 0x01,
+               0x22, 0x10, 0x40, 0x03, 0x72, 0x03),
+       PLL_CTL(27000000, 0x68,
+               0x22, 0x04, 0x4B, 0x03, 0x00, 0x04,
+               0x2A, 0x10, 0x7D, 0x03, 0x20, 0x06),
+       PLL_CTL(36000000, 0x8C,
+               0x1B, 0x04, 0x4B, 0x03, 0x00, 0x03,
+               0x2A, 0x10, 0x7D, 0x03, 0x98, 0x04),
+       PLL_CTL(25000000, 0x61,
+               0x1B, 0x04, 0x37, 0x03, 0x2B, 0x03,
+               0x1A, 0x10, 0x2A, 0x03, 0x39, 0x02),
+       PLL_CTL(26000000, 0x65,
+               0x23, 0x04, 0x41, 0x05, 0x00, 0x06,
+               0x1A, 0x10, 0x26, 0x03, 0xEF, 0x01),
+       PLL_CTL(12288000, 0x2F,
+               0x1A, 0x04, 0x12, 0x03, 0x1C, 0x02,
+               0x22, 0x10, 0x20, 0x03, 0x72, 0x03),
+       PLL_CTL(40000000, 0x9B,
+               0x22, 0x08, 0x7D, 0x03, 0x80, 0x04,
+               0x23, 0x10, 0x7D, 0x05, 0xE4, 0x06),
+       PLL_CTL(512000, 0x01,
+               0x22, 0x04, 0x01, 0x03, 0xD0, 0x02,
+               0x1B, 0x10, 0x01, 0x04, 0x72, 0x03),
+       PLL_CTL(705600, 0x02,
+               0x22, 0x04, 0x02, 0x03, 0x15, 0x04,
+               0x22, 0x10, 0x01, 0x04, 0x80, 0x02),
+       PLL_CTL(1024000, 0x03,
+               0x22, 0x04, 0x02, 0x03, 0xD0, 0x02,
+               0x1B, 0x10, 0x02, 0x04, 0x72, 0x03),
+       PLL_CTL(2048000, 0x07,
+               0x22, 0x04, 0x04, 0x03, 0xD0, 0x02,
+               0x1B, 0x10, 0x04, 0x04, 0x72, 0x03),
+       PLL_CTL(2400000, 0x08,
+               0x22, 0x04, 0x05, 0x03, 0x00, 0x03,
+               0x23, 0x10, 0x05, 0x05, 0x98, 0x04),
+};
+
+static const struct pll_ctl *get_pll_ctl(int input_freq)
+{
+       int i;
+       const struct pll_ctl *pll_ctl = NULL;
+
+       for (i = 0; i < ARRAY_SIZE(pll_ctls); ++i)
+               if (input_freq == pll_ctls[i].input_freq) {
+                       pll_ctl = &pll_ctls[i];
+                       break;
+               }
+
+       return pll_ctl;
+}
+
+static int set_pll_ctl_from_input_freq(struct snd_soc_codec *codec,
+               const int input_freq)
+{
+       int ret;
+       int i;
+       const struct pll_ctl *pll_ctl;
+
+       pll_ctl = get_pll_ctl(input_freq);
+       if (!pll_ctl) {
+               ret = -EINVAL;
+               dev_err(codec->dev, "No PLL input entry for %d (%d)\n",
+                       input_freq, ret);
+               return ret;
+       }
+
+       for (i = 0; i < PLL_REG_SETTINGS_COUNT; ++i) {
+               ret = snd_soc_update_bits(codec,
+                       pll_ctl->settings[i].addr,
+                       pll_ctl->settings[i].mask,
+                       pll_ctl->settings[i].val);
+               if (ret < 0) {
+                       dev_err(codec->dev, "Failed to set pll ctl (%d)\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int tscs42xx_hw_params(struct snd_pcm_substream *substream,
+               struct snd_pcm_hw_params *params,
+               struct snd_soc_dai *codec_dai)
+{
+       struct snd_soc_codec *codec = codec_dai->codec;
+       int ret;
+
+       ret = setup_sample_format(codec, params_format(params));
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to setup sample format (%d)\n",
+                       ret);
+               return ret;
+       }
+
+       ret = setup_sample_rate(codec, params_rate(params));
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to setup sample rate (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static inline int dac_mute(struct snd_soc_codec *codec)
+{
+       int ret;
+
+       ret = snd_soc_update_bits(codec, R_CNVRTR1, RM_CNVRTR1_DACMU,
+               RV_CNVRTR1_DACMU_ENABLE);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to mute DAC (%d)\n",
+                               ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static inline int dac_unmute(struct snd_soc_codec *codec)
+{
+       int ret;
+
+       ret = snd_soc_update_bits(codec, R_CNVRTR1, RM_CNVRTR1_DACMU,
+               RV_CNVRTR1_DACMU_DISABLE);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to unmute DAC (%d)\n",
+                               ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static inline int adc_mute(struct snd_soc_codec *codec)
+{
+       int ret;
+
+       ret = snd_soc_update_bits(codec, R_CNVRTR0, RM_CNVRTR0_ADCMU,
+               RV_CNVRTR0_ADCMU_ENABLE);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to mute ADC (%d)\n",
+                               ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static inline int adc_unmute(struct snd_soc_codec *codec)
+{
+       int ret;
+
+       ret = snd_soc_update_bits(codec, R_CNVRTR0, RM_CNVRTR0_ADCMU,
+               RV_CNVRTR0_ADCMU_DISABLE);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to unmute ADC (%d)\n",
+                               ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int tscs42xx_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       int ret;
+
+       if (mute)
+               if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+                       ret = dac_mute(codec);
+               else
+                       ret = adc_mute(codec);
+       else
+               if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+                       ret = dac_unmute(codec);
+               else
+                       ret = adc_unmute(codec);
+
+       return ret;
+}
+
+static int tscs42xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
+               unsigned int fmt)
+{
+       struct snd_soc_codec *codec = codec_dai->codec;
+       int ret;
+
+       /* Slave mode not supported since it needs always-on frame clock */
+       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBM_CFM:
+               ret = snd_soc_update_bits(codec, R_AIC1, RM_AIC1_MS,
+                               RV_AIC1_MS_MASTER);
+               if (ret < 0) {
+                       dev_err(codec->dev,
+                               "Failed to set codec DAI master (%d)\n", ret);
+                       return ret;
+               }
+               break;
+       default:
+               ret = -EINVAL;
+               dev_err(codec->dev, "Unsupported format (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int tscs42xx_set_dai_bclk_ratio(struct snd_soc_dai *codec_dai,
+               unsigned int ratio)
+{
+       struct snd_soc_codec *codec = codec_dai->codec;
+       struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+       unsigned int value;
+       int ret = 0;
+
+       switch (ratio) {
+       case 32:
+               value = RV_DACSR_DBCM_32;
+               break;
+       case 40:
+               value = RV_DACSR_DBCM_40;
+               break;
+       case 64:
+               value = RV_DACSR_DBCM_64;
+               break;
+       default:
+               dev_err(codec->dev, "Unsupported bclk ratio (%d)\n", ret);
+               return -EINVAL;
+       }
+
+       ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBCM, value);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to set DAC BCLK ratio (%d)\n", ret);
+               return ret;
+       }
+       ret = snd_soc_update_bits(codec, R_ADCSR, RM_ADCSR_ABCM, value);
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to set ADC BCLK ratio (%d)\n", ret);
+               return ret;
+       }
+
+       mutex_lock(&tscs42xx->audio_params_lock);
+
+       tscs42xx->bclk_ratio = ratio;
+
+       mutex_unlock(&tscs42xx->audio_params_lock);
+
+       return 0;
+}
+
+static int tscs42xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+       int clk_id, unsigned int freq, int dir)
+{
+       struct snd_soc_codec *codec = codec_dai->codec;
+       int ret;
+
+       switch (clk_id) {
+       case TSCS42XX_PLL_SRC_XTAL:
+       case TSCS42XX_PLL_SRC_MCLK1:
+               ret = snd_soc_write(codec, R_PLLREFSEL,
+                               RV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 |
+                               RV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1);
+               if (ret < 0) {
+                       dev_err(codec->dev,
+                               "Failed to set pll reference input (%d)\n",
+                               ret);
+                       return ret;
+               }
+               break;
+       case TSCS42XX_PLL_SRC_MCLK2:
+               ret = snd_soc_write(codec, R_PLLREFSEL,
+                               RV_PLLREFSEL_PLL1_REF_SEL_MCLK2 |
+                               RV_PLLREFSEL_PLL2_REF_SEL_MCLK2);
+               if (ret < 0) {
+                       dev_err(codec->dev,
+                               "Failed to set PLL reference (%d)\n", ret);
+                       return ret;
+               }
+               break;
+       default:
+               dev_err(codec->dev, "pll src is unsupported\n");
+               return -EINVAL;
+       }
+
+       ret = set_pll_ctl_from_input_freq(codec, freq);
+       if (ret < 0) {
+               dev_err(codec->dev,
+                       "Failed to setup PLL input freq (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct snd_soc_dai_ops tscs42xx_dai_ops = {
+       .hw_params      = tscs42xx_hw_params,
+       .mute_stream    = tscs42xx_mute_stream,
+       .set_fmt        = tscs42xx_set_dai_fmt,
+       .set_bclk_ratio = tscs42xx_set_dai_bclk_ratio,
+       .set_sysclk     = tscs42xx_set_dai_sysclk,
+};
+
+static int part_is_valid(struct tscs42xx *tscs42xx)
+{
+       int val;
+       int ret;
+       unsigned int reg;
+
+       ret = regmap_read(tscs42xx->regmap, R_DEVIDH, &reg);
+       if (ret < 0)
+               return ret;
+
+       val = reg << 8;
+       ret = regmap_read(tscs42xx->regmap, R_DEVIDL, &reg);
+       if (ret < 0)
+               return ret;
+
+       val |= reg;
+
+       switch (val) {
+       case 0x4A74:
+       case 0x4A73:
+               return true;
+       default:
+               return false;
+       };
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tscs42xx = {
+       .component_driver = {
+               .dapm_widgets = tscs42xx_dapm_widgets,
+               .num_dapm_widgets = ARRAY_SIZE(tscs42xx_dapm_widgets),
+               .dapm_routes = tscs42xx_intercon,
+               .num_dapm_routes = ARRAY_SIZE(tscs42xx_intercon),
+               .controls =     tscs42xx_snd_controls,
+               .num_controls = ARRAY_SIZE(tscs42xx_snd_controls),
+       },
+};
+
+static inline void init_coeff_ram_cache(struct tscs42xx *tscs42xx)
+{
+       const u8 norm_addrs[] = { 0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1f,
+               0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3f, 0x40, 0x45, 0x4a,
+               0x4f, 0x54, 0x59, 0x5f, 0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79,
+               0x7f, 0x80, 0x85, 0x8c, 0x91, 0x96, 0x97, 0x9c, 0xa3, 0xa8,
+               0xad, 0xaf, 0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, };
+       u8 *coeff_ram = tscs42xx->coeff_ram;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(norm_addrs); i++)
+               coeff_ram[((norm_addrs[i] + 1) * COEFF_SIZE) - 1] = 0x40;
+}
+
+#define TSCS42XX_RATES SNDRV_PCM_RATE_8000_96000
+
+#define TSCS42XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
+       | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver tscs42xx_dai = {
+       .name = "tscs42xx-HiFi",
+       .playback = {
+               .stream_name = "HiFi Playback",
+               .channels_min = 2,
+               .channels_max = 2,
+               .rates = TSCS42XX_RATES,
+               .formats = TSCS42XX_FORMATS,},
+       .capture = {
+               .stream_name = "HiFi Capture",
+               .channels_min = 2,
+               .channels_max = 2,
+               .rates = TSCS42XX_RATES,
+               .formats = TSCS42XX_FORMATS,},
+       .ops = &tscs42xx_dai_ops,
+       .symmetric_rates = 1,
+       .symmetric_channels = 1,
+       .symmetric_samplebits = 1,
+};
+
+static const struct reg_sequence tscs42xx_patch[] = {
+       { R_AIC2, RV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED },
+};
+
+static int tscs42xx_i2c_probe(struct i2c_client *i2c,
+               const struct i2c_device_id *id)
+{
+       struct tscs42xx *tscs42xx;
+       int ret = 0;
+
+       tscs42xx = devm_kzalloc(&i2c->dev, sizeof(*tscs42xx), GFP_KERNEL);
+       if (!tscs42xx) {
+               ret = -ENOMEM;
+               dev_err(&i2c->dev,
+                       "Failed to allocate memory for data (%d)\n", ret);
+               return ret;
+       }
+       i2c_set_clientdata(i2c, tscs42xx);
+       tscs42xx->dev = &i2c->dev;
+
+       tscs42xx->regmap = devm_regmap_init_i2c(i2c, &tscs42xx_regmap);
+       if (IS_ERR(tscs42xx->regmap)) {
+               ret = PTR_ERR(tscs42xx->regmap);
+               dev_err(tscs42xx->dev, "Failed to allocate regmap (%d)\n", ret);
+               return ret;
+       }
+
+       init_coeff_ram_cache(tscs42xx);
+
+       ret = part_is_valid(tscs42xx);
+       if (ret <= 0) {
+               dev_err(tscs42xx->dev, "No valid part (%d)\n", ret);
+               ret = -ENODEV;
+               return ret;
+       }
+
+       ret = regmap_write(tscs42xx->regmap, R_RESET, RV_RESET_ENABLE);
+       if (ret < 0) {
+               dev_err(tscs42xx->dev, "Failed to reset device (%d)\n", ret);
+               return ret;
+       }
+
+       ret = regmap_register_patch(tscs42xx->regmap, tscs42xx_patch,
+                       ARRAY_SIZE(tscs42xx_patch));
+       if (ret < 0) {
+               dev_err(tscs42xx->dev, "Failed to apply patch (%d)\n", ret);
+               return ret;
+       }
+
+       mutex_init(&tscs42xx->audio_params_lock);
+       mutex_init(&tscs42xx->coeff_ram_lock);
+       mutex_init(&tscs42xx->pll_lock);
+
+       ret = snd_soc_register_codec(tscs42xx->dev, &soc_codec_dev_tscs42xx,
+                       &tscs42xx_dai, 1);
+       if (ret) {
+               dev_err(tscs42xx->dev, "Failed to register codec (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int tscs42xx_i2c_remove(struct i2c_client *client)
+{
+       snd_soc_unregister_codec(&client->dev);
+
+       return 0;
+}
+
+static const struct i2c_device_id tscs42xx_i2c_id[] = {
+       { "tscs42A1", 0 },
+       { "tscs42A2", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tscs42xx_i2c_id);
+
+static const struct of_device_id tscs42xx_of_match[] = {
+       { .compatible = "tempo,tscs42A1", },
+       { .compatible = "tempo,tscs42A2", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, tscs42xx_of_match);
+
+static struct i2c_driver tscs42xx_i2c_driver = {
+       .driver = {
+               .name = "tscs42xx",
+               .owner = THIS_MODULE,
+               .of_match_table = tscs42xx_of_match,
+       },
+       .probe =    tscs42xx_i2c_probe,
+       .remove =   tscs42xx_i2c_remove,
+       .id_table = tscs42xx_i2c_id,
+};
+
+module_i2c_driver(tscs42xx_i2c_driver);
+
+MODULE_AUTHOR("Tempo Semiconductor <steven.eckhoff.opensource@gmail.com");
+MODULE_DESCRIPTION("ASoC TSCS42xx driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tscs42xx.h b/sound/soc/codecs/tscs42xx.h
new file mode 100644 (file)
index 0000000..d4a30bc
--- /dev/null
@@ -0,0 +1,2693 @@
+// SPDX-License-Identifier: GPL-2.0
+// tscs42xx.h -- TSCS42xx ALSA SoC Audio driver
+// Copyright 2017 Tempo Semiconductor, Inc.
+// Author: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+
+#ifndef __WOOKIE_H__
+#define __WOOKIE_H__
+
+enum {
+       TSCS42XX_PLL_SRC_NONE,
+       TSCS42XX_PLL_SRC_XTAL,
+       TSCS42XX_PLL_SRC_MCLK1,
+       TSCS42XX_PLL_SRC_MCLK2,
+};
+
+#define R_HPVOLL        0x0
+#define R_HPVOLR        0x1
+#define R_SPKVOLL       0x2
+#define R_SPKVOLR       0x3
+#define R_DACVOLL       0x4
+#define R_DACVOLR       0x5
+#define R_ADCVOLL       0x6
+#define R_ADCVOLR       0x7
+#define R_INVOLL        0x8
+#define R_INVOLR        0x9
+#define R_INMODE        0x0B
+#define R_INSELL        0x0C
+#define R_INSELR        0x0D
+#define R_AIC1          0x13
+#define R_AIC2          0x14
+#define R_CNVRTR0       0x16
+#define R_ADCSR         0x17
+#define R_CNVRTR1       0x18
+#define R_DACSR         0x19
+#define R_PWRM1         0x1A
+#define R_PWRM2         0x1B
+#define R_CONFIG0       0x1F
+#define R_CONFIG1       0x20
+#define R_DMICCTL       0x24
+#define R_CLECTL        0x25
+#define R_MUGAIN        0x26
+#define R_COMPTH        0x27
+#define R_CMPRAT        0x28
+#define R_CATKTCL       0x29
+#define R_CATKTCH       0x2A
+#define R_CRELTCL       0x2B
+#define R_CRELTCH       0x2C
+#define R_LIMTH         0x2D
+#define R_LIMTGT        0x2E
+#define R_LATKTCL       0x2F
+#define R_LATKTCH       0x30
+#define R_LRELTCL       0x31
+#define R_LRELTCH       0x32
+#define R_EXPTH         0x33
+#define R_EXPRAT        0x34
+#define R_XATKTCL       0x35
+#define R_XATKTCH       0x36
+#define R_XRELTCL       0x37
+#define R_XRELTCH       0x38
+#define R_FXCTL         0x39
+#define R_DACCRWRL      0x3A
+#define R_DACCRWRM      0x3B
+#define R_DACCRWRH      0x3C
+#define R_DACCRRDL      0x3D
+#define R_DACCRRDM      0x3E
+#define R_DACCRRDH      0x3F
+#define R_DACCRADDR     0x40
+#define R_DCOFSEL       0x41
+#define R_PLLCTL9       0x4E
+#define R_PLLCTLA       0x4F
+#define R_PLLCTLB       0x50
+#define R_PLLCTLC       0x51
+#define R_PLLCTLD       0x52
+#define R_PLLCTLE       0x53
+#define R_PLLCTLF       0x54
+#define R_PLLCTL10      0x55
+#define R_PLLCTL11      0x56
+#define R_PLLCTL12      0x57
+#define R_PLLCTL1B      0x60
+#define R_PLLCTL1C      0x61
+#define R_TIMEBASE      0x77
+#define R_DEVIDL        0x7D
+#define R_DEVIDH        0x7E
+#define R_RESET         0x80
+#define R_DACCRSTAT     0x8A
+#define R_PLLCTL0       0x8E
+#define R_PLLREFSEL     0x8F
+#define R_DACMBCEN      0xC7
+#define R_DACMBCCTL     0xC8
+#define R_DACMBCMUG1    0xC9
+#define R_DACMBCTHR1    0xCA
+#define R_DACMBCRAT1    0xCB
+#define R_DACMBCATK1L   0xCC
+#define R_DACMBCATK1H   0xCD
+#define R_DACMBCREL1L   0xCE
+#define R_DACMBCREL1H   0xCF
+#define R_DACMBCMUG2    0xD0
+#define R_DACMBCTHR2    0xD1
+#define R_DACMBCRAT2    0xD2
+#define R_DACMBCATK2L   0xD3
+#define R_DACMBCATK2H   0xD4
+#define R_DACMBCREL2L   0xD5
+#define R_DACMBCREL2H   0xD6
+#define R_DACMBCMUG3    0xD7
+#define R_DACMBCTHR3    0xD8
+#define R_DACMBCRAT3    0xD9
+#define R_DACMBCATK3L   0xDA
+#define R_DACMBCATK3H   0xDB
+#define R_DACMBCREL3L   0xDC
+#define R_DACMBCREL3H   0xDD
+
+/* Helpers */
+#define RM(m, b) ((m)<<(b))
+#define RV(v, b) ((v)<<(b))
+
+/****************************
+ *      R_HPVOLL (0x0)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_HPVOLL                            0
+
+/* Field Masks */
+#define FM_HPVOLL                            0X7F
+
+/* Field Values */
+#define FV_HPVOLL_P6DB                       0x7F
+#define FV_HPVOLL_N88PT5DB                   0x1
+#define FV_HPVOLL_MUTE                       0x0
+
+/* Register Masks */
+#define RM_HPVOLL                            RM(FM_HPVOLL, FB_HPVOLL)
+
+/* Register Values */
+#define RV_HPVOLL_P6DB                       RV(FV_HPVOLL_P6DB, FB_HPVOLL)
+#define RV_HPVOLL_N88PT5DB                   RV(FV_HPVOLL_N88PT5DB, FB_HPVOLL)
+#define RV_HPVOLL_MUTE                       RV(FV_HPVOLL_MUTE, FB_HPVOLL)
+
+/****************************
+ *      R_HPVOLR (0x1)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_HPVOLR                            0
+
+/* Field Masks */
+#define FM_HPVOLR                            0X7F
+
+/* Field Values */
+#define FV_HPVOLR_P6DB                       0x7F
+#define FV_HPVOLR_N88PT5DB                   0x1
+#define FV_HPVOLR_MUTE                       0x0
+
+/* Register Masks */
+#define RM_HPVOLR                            RM(FM_HPVOLR, FB_HPVOLR)
+
+/* Register Values */
+#define RV_HPVOLR_P6DB                       RV(FV_HPVOLR_P6DB, FB_HPVOLR)
+#define RV_HPVOLR_N88PT5DB                   RV(FV_HPVOLR_N88PT5DB, FB_HPVOLR)
+#define RV_HPVOLR_MUTE                       RV(FV_HPVOLR_MUTE, FB_HPVOLR)
+
+/*****************************
+ *      R_SPKVOLL (0x2)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_SPKVOLL                           0
+
+/* Field Masks */
+#define FM_SPKVOLL                           0X7F
+
+/* Field Values */
+#define FV_SPKVOLL_P12DB                     0x7F
+#define FV_SPKVOLL_N77PT25DB                 0x8
+#define FV_SPKVOLL_MUTE                      0x0
+
+/* Register Masks */
+#define RM_SPKVOLL                           RM(FM_SPKVOLL, FB_SPKVOLL)
+
+/* Register Values */
+#define RV_SPKVOLL_P12DB                     RV(FV_SPKVOLL_P12DB, FB_SPKVOLL)
+#define RV_SPKVOLL_N77PT25DB \
+        RV(FV_SPKVOLL_N77PT25DB, FB_SPKVOLL)
+
+#define RV_SPKVOLL_MUTE                      RV(FV_SPKVOLL_MUTE, FB_SPKVOLL)
+
+/*****************************
+ *      R_SPKVOLR (0x3)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_SPKVOLR                           0
+
+/* Field Masks */
+#define FM_SPKVOLR                           0X7F
+
+/* Field Values */
+#define FV_SPKVOLR_P12DB                     0x7F
+#define FV_SPKVOLR_N77PT25DB                 0x8
+#define FV_SPKVOLR_MUTE                      0x0
+
+/* Register Masks */
+#define RM_SPKVOLR                           RM(FM_SPKVOLR, FB_SPKVOLR)
+
+/* Register Values */
+#define RV_SPKVOLR_P12DB                     RV(FV_SPKVOLR_P12DB, FB_SPKVOLR)
+#define RV_SPKVOLR_N77PT25DB \
+        RV(FV_SPKVOLR_N77PT25DB, FB_SPKVOLR)
+
+#define RV_SPKVOLR_MUTE                      RV(FV_SPKVOLR_MUTE, FB_SPKVOLR)
+
+/*****************************
+ *      R_DACVOLL (0x4)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DACVOLL                           0
+
+/* Field Masks */
+#define FM_DACVOLL                           0XFF
+
+/* Field Values */
+#define FV_DACVOLL_0DB                       0xFF
+#define FV_DACVOLL_N95PT625DB                0x1
+#define FV_DACVOLL_MUTE                      0x0
+
+/* Register Masks */
+#define RM_DACVOLL                           RM(FM_DACVOLL, FB_DACVOLL)
+
+/* Register Values */
+#define RV_DACVOLL_0DB                       RV(FV_DACVOLL_0DB, FB_DACVOLL)
+#define RV_DACVOLL_N95PT625DB \
+        RV(FV_DACVOLL_N95PT625DB, FB_DACVOLL)
+
+#define RV_DACVOLL_MUTE                      RV(FV_DACVOLL_MUTE, FB_DACVOLL)
+
+/*****************************
+ *      R_DACVOLR (0x5)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DACVOLR                           0
+
+/* Field Masks */
+#define FM_DACVOLR                           0XFF
+
+/* Field Values */
+#define FV_DACVOLR_0DB                       0xFF
+#define FV_DACVOLR_N95PT625DB                0x1
+#define FV_DACVOLR_MUTE                      0x0
+
+/* Register Masks */
+#define RM_DACVOLR                           RM(FM_DACVOLR, FB_DACVOLR)
+
+/* Register Values */
+#define RV_DACVOLR_0DB                       RV(FV_DACVOLR_0DB, FB_DACVOLR)
+#define RV_DACVOLR_N95PT625DB \
+        RV(FV_DACVOLR_N95PT625DB, FB_DACVOLR)
+
+#define RV_DACVOLR_MUTE                      RV(FV_DACVOLR_MUTE, FB_DACVOLR)
+
+/*****************************
+ *      R_ADCVOLL (0x6)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_ADCVOLL                           0
+
+/* Field Masks */
+#define FM_ADCVOLL                           0XFF
+
+/* Field Values */
+#define FV_ADCVOLL_P24DB                     0xFF
+#define FV_ADCVOLL_N71PT25DB                 0x1
+#define FV_ADCVOLL_MUTE                      0x0
+
+/* Register Masks */
+#define RM_ADCVOLL                           RM(FM_ADCVOLL, FB_ADCVOLL)
+
+/* Register Values */
+#define RV_ADCVOLL_P24DB                     RV(FV_ADCVOLL_P24DB, FB_ADCVOLL)
+#define RV_ADCVOLL_N71PT25DB \
+        RV(FV_ADCVOLL_N71PT25DB, FB_ADCVOLL)
+
+#define RV_ADCVOLL_MUTE                      RV(FV_ADCVOLL_MUTE, FB_ADCVOLL)
+
+/*****************************
+ *      R_ADCVOLR (0x7)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_ADCVOLR                           0
+
+/* Field Masks */
+#define FM_ADCVOLR                           0XFF
+
+/* Field Values */
+#define FV_ADCVOLR_P24DB                     0xFF
+#define FV_ADCVOLR_N71PT25DB                 0x1
+#define FV_ADCVOLR_MUTE                      0x0
+
+/* Register Masks */
+#define RM_ADCVOLR                           RM(FM_ADCVOLR, FB_ADCVOLR)
+
+/* Register Values */
+#define RV_ADCVOLR_P24DB                     RV(FV_ADCVOLR_P24DB, FB_ADCVOLR)
+#define RV_ADCVOLR_N71PT25DB \
+        RV(FV_ADCVOLR_N71PT25DB, FB_ADCVOLR)
+
+#define RV_ADCVOLR_MUTE                      RV(FV_ADCVOLR_MUTE, FB_ADCVOLR)
+
+/****************************
+ *      R_INVOLL (0x8)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_INVOLL_INMUTEL                    7
+#define FB_INVOLL_IZCL                       6
+#define FB_INVOLL                            0
+
+/* Field Masks */
+#define FM_INVOLL_INMUTEL                    0X1
+#define FM_INVOLL_IZCL                       0X1
+#define FM_INVOLL                            0X3F
+
+/* Field Values */
+#define FV_INVOLL_INMUTEL_ENABLE             0x1
+#define FV_INVOLL_INMUTEL_DISABLE            0x0
+#define FV_INVOLL_IZCL_ENABLE                0x1
+#define FV_INVOLL_IZCL_DISABLE               0x0
+#define FV_INVOLL_P30DB                      0x3F
+#define FV_INVOLL_N17PT25DB                  0x0
+
+/* Register Masks */
+#define RM_INVOLL_INMUTEL \
+        RM(FM_INVOLL_INMUTEL, FB_INVOLL_INMUTEL)
+
+#define RM_INVOLL_IZCL                       RM(FM_INVOLL_IZCL, FB_INVOLL_IZCL)
+#define RM_INVOLL                            RM(FM_INVOLL, FB_INVOLL)
+
+/* Register Values */
+#define RV_INVOLL_INMUTEL_ENABLE \
+        RV(FV_INVOLL_INMUTEL_ENABLE, FB_INVOLL_INMUTEL)
+
+#define RV_INVOLL_INMUTEL_DISABLE \
+        RV(FV_INVOLL_INMUTEL_DISABLE, FB_INVOLL_INMUTEL)
+
+#define RV_INVOLL_IZCL_ENABLE \
+        RV(FV_INVOLL_IZCL_ENABLE, FB_INVOLL_IZCL)
+
+#define RV_INVOLL_IZCL_DISABLE \
+        RV(FV_INVOLL_IZCL_DISABLE, FB_INVOLL_IZCL)
+
+#define RV_INVOLL_P30DB                      RV(FV_INVOLL_P30DB, FB_INVOLL)
+#define RV_INVOLL_N17PT25DB                  RV(FV_INVOLL_N17PT25DB, FB_INVOLL)
+
+/****************************
+ *      R_INVOLR (0x9)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_INVOLR_INMUTER                    7
+#define FB_INVOLR_IZCR                       6
+#define FB_INVOLR                            0
+
+/* Field Masks */
+#define FM_INVOLR_INMUTER                    0X1
+#define FM_INVOLR_IZCR                       0X1
+#define FM_INVOLR                            0X3F
+
+/* Field Values */
+#define FV_INVOLR_INMUTER_ENABLE             0x1
+#define FV_INVOLR_INMUTER_DISABLE            0x0
+#define FV_INVOLR_IZCR_ENABLE                0x1
+#define FV_INVOLR_IZCR_DISABLE               0x0
+#define FV_INVOLR_P30DB                      0x3F
+#define FV_INVOLR_N17PT25DB                  0x0
+
+/* Register Masks */
+#define RM_INVOLR_INMUTER \
+        RM(FM_INVOLR_INMUTER, FB_INVOLR_INMUTER)
+
+#define RM_INVOLR_IZCR                       RM(FM_INVOLR_IZCR, FB_INVOLR_IZCR)
+#define RM_INVOLR                            RM(FM_INVOLR, FB_INVOLR)
+
+/* Register Values */
+#define RV_INVOLR_INMUTER_ENABLE \
+        RV(FV_INVOLR_INMUTER_ENABLE, FB_INVOLR_INMUTER)
+
+#define RV_INVOLR_INMUTER_DISABLE \
+        RV(FV_INVOLR_INMUTER_DISABLE, FB_INVOLR_INMUTER)
+
+#define RV_INVOLR_IZCR_ENABLE \
+        RV(FV_INVOLR_IZCR_ENABLE, FB_INVOLR_IZCR)
+
+#define RV_INVOLR_IZCR_DISABLE \
+        RV(FV_INVOLR_IZCR_DISABLE, FB_INVOLR_IZCR)
+
+#define RV_INVOLR_P30DB                      RV(FV_INVOLR_P30DB, FB_INVOLR)
+#define RV_INVOLR_N17PT25DB                  RV(FV_INVOLR_N17PT25DB, FB_INVOLR)
+
+/*****************************
+ *      R_INMODE (0x0B)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INMODE_DS                         0
+
+/* Field Masks */
+#define FM_INMODE_DS                         0X1
+
+/* Field Values */
+#define FV_INMODE_DS_LRIN1                   0x0
+#define FV_INMODE_DS_LRIN2                   0x1
+
+/* Register Masks */
+#define RM_INMODE_DS                         RM(FM_INMODE_DS, FB_INMODE_DS)
+
+/* Register Values */
+#define RV_INMODE_DS_LRIN1 \
+        RV(FV_INMODE_DS_LRIN1, FB_INMODE_DS)
+
+#define RV_INMODE_DS_LRIN2 \
+        RV(FV_INMODE_DS_LRIN2, FB_INMODE_DS)
+
+
+/*****************************
+ *      R_INSELL (0x0C)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INSELL                            6
+#define FB_INSELL_MICBSTL                    4
+
+/* Field Masks */
+#define FM_INSELL                            0X3
+#define FM_INSELL_MICBSTL                    0X3
+
+/* Field Values */
+#define FV_INSELL_IN1                        0x0
+#define FV_INSELL_IN2                        0x1
+#define FV_INSELL_IN3                        0x2
+#define FV_INSELL_D2S                        0x3
+#define FV_INSELL_MICBSTL_OFF                0x0
+#define FV_INSELL_MICBSTL_10DB               0x1
+#define FV_INSELL_MICBSTL_20DB               0x2
+#define FV_INSELL_MICBSTL_30DB               0x3
+
+/* Register Masks */
+#define RM_INSELL                            RM(FM_INSELL, FB_INSELL)
+#define RM_INSELL_MICBSTL \
+        RM(FM_INSELL_MICBSTL, FB_INSELL_MICBSTL)
+
+
+/* Register Values */
+#define RV_INSELL_IN1                        RV(FV_INSELL_IN1, FB_INSELL)
+#define RV_INSELL_IN2                        RV(FV_INSELL_IN2, FB_INSELL)
+#define RV_INSELL_IN3                        RV(FV_INSELL_IN3, FB_INSELL)
+#define RV_INSELL_D2S                        RV(FV_INSELL_D2S, FB_INSELL)
+#define RV_INSELL_MICBSTL_OFF \
+        RV(FV_INSELL_MICBSTL_OFF, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_10DB \
+        RV(FV_INSELL_MICBSTL_10DB, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_20DB \
+        RV(FV_INSELL_MICBSTL_20DB, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_30DB \
+        RV(FV_INSELL_MICBSTL_30DB, FB_INSELL_MICBSTL)
+
+
+/*****************************
+ *      R_INSELR (0x0D)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INSELR                            6
+#define FB_INSELR_MICBSTR                    4
+
+/* Field Masks */
+#define FM_INSELR                            0X3
+#define FM_INSELR_MICBSTR                    0X3
+
+/* Field Values */
+#define FV_INSELR_IN1                        0x0
+#define FV_INSELR_IN2                        0x1
+#define FV_INSELR_IN3                        0x2
+#define FV_INSELR_D2S                        0x3
+#define FV_INSELR_MICBSTR_OFF                0x0
+#define FV_INSELR_MICBSTR_10DB               0x1
+#define FV_INSELR_MICBSTR_20DB               0x2
+#define FV_INSELR_MICBSTR_30DB               0x3
+
+/* Register Masks */
+#define RM_INSELR                            RM(FM_INSELR, FB_INSELR)
+#define RM_INSELR_MICBSTR \
+        RM(FM_INSELR_MICBSTR, FB_INSELR_MICBSTR)
+
+
+/* Register Values */
+#define RV_INSELR_IN1                        RV(FV_INSELR_IN1, FB_INSELR)
+#define RV_INSELR_IN2                        RV(FV_INSELR_IN2, FB_INSELR)
+#define RV_INSELR_IN3                        RV(FV_INSELR_IN3, FB_INSELR)
+#define RV_INSELR_D2S                        RV(FV_INSELR_D2S, FB_INSELR)
+#define RV_INSELR_MICBSTR_OFF \
+        RV(FV_INSELR_MICBSTR_OFF, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_10DB \
+        RV(FV_INSELR_MICBSTR_10DB, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_20DB \
+        RV(FV_INSELR_MICBSTR_20DB, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_30DB \
+        RV(FV_INSELR_MICBSTR_30DB, FB_INSELR_MICBSTR)
+
+
+/***************************
+ *      R_AIC1 (0x13)      *
+ ***************************/
+
+/* Field Offsets */
+#define FB_AIC1_BCLKINV                      6
+#define FB_AIC1_MS                           5
+#define FB_AIC1_LRP                          4
+#define FB_AIC1_WL                           2
+#define FB_AIC1_FORMAT                       0
+
+/* Field Masks */
+#define FM_AIC1_BCLKINV                      0X1
+#define FM_AIC1_MS                           0X1
+#define FM_AIC1_LRP                          0X1
+#define FM_AIC1_WL                           0X3
+#define FM_AIC1_FORMAT                       0X3
+
+/* Field Values */
+#define FV_AIC1_BCLKINV_ENABLE               0x1
+#define FV_AIC1_BCLKINV_DISABLE              0x0
+#define FV_AIC1_MS_MASTER                    0x1
+#define FV_AIC1_MS_SLAVE                     0x0
+#define FV_AIC1_LRP_INVERT                   0x1
+#define FV_AIC1_LRP_NORMAL                   0x0
+#define FV_AIC1_WL_16                        0x0
+#define FV_AIC1_WL_20                        0x1
+#define FV_AIC1_WL_24                        0x2
+#define FV_AIC1_WL_32                        0x3
+#define FV_AIC1_FORMAT_RIGHT                 0x0
+#define FV_AIC1_FORMAT_LEFT                  0x1
+#define FV_AIC1_FORMAT_I2S                   0x2
+
+/* Register Masks */
+#define RM_AIC1_BCLKINV \
+        RM(FM_AIC1_BCLKINV, FB_AIC1_BCLKINV)
+
+#define RM_AIC1_MS                           RM(FM_AIC1_MS, FB_AIC1_MS)
+#define RM_AIC1_LRP                          RM(FM_AIC1_LRP, FB_AIC1_LRP)
+#define RM_AIC1_WL                           RM(FM_AIC1_WL, FB_AIC1_WL)
+#define RM_AIC1_FORMAT                       RM(FM_AIC1_FORMAT, FB_AIC1_FORMAT)
+
+/* Register Values */
+#define RV_AIC1_BCLKINV_ENABLE \
+        RV(FV_AIC1_BCLKINV_ENABLE, FB_AIC1_BCLKINV)
+
+#define RV_AIC1_BCLKINV_DISABLE \
+        RV(FV_AIC1_BCLKINV_DISABLE, FB_AIC1_BCLKINV)
+
+#define RV_AIC1_MS_MASTER                    RV(FV_AIC1_MS_MASTER, FB_AIC1_MS)
+#define RV_AIC1_MS_SLAVE                     RV(FV_AIC1_MS_SLAVE, FB_AIC1_MS)
+#define RV_AIC1_LRP_INVERT \
+        RV(FV_AIC1_LRP_INVERT, FB_AIC1_LRP)
+
+#define RV_AIC1_LRP_NORMAL \
+        RV(FV_AIC1_LRP_NORMAL, FB_AIC1_LRP)
+
+#define RV_AIC1_WL_16                        RV(FV_AIC1_WL_16, FB_AIC1_WL)
+#define RV_AIC1_WL_20                        RV(FV_AIC1_WL_20, FB_AIC1_WL)
+#define RV_AIC1_WL_24                        RV(FV_AIC1_WL_24, FB_AIC1_WL)
+#define RV_AIC1_WL_32                        RV(FV_AIC1_WL_32, FB_AIC1_WL)
+#define RV_AIC1_FORMAT_RIGHT \
+        RV(FV_AIC1_FORMAT_RIGHT, FB_AIC1_FORMAT)
+
+#define RV_AIC1_FORMAT_LEFT \
+        RV(FV_AIC1_FORMAT_LEFT, FB_AIC1_FORMAT)
+
+#define RV_AIC1_FORMAT_I2S \
+        RV(FV_AIC1_FORMAT_I2S, FB_AIC1_FORMAT)
+
+
+/***************************
+ *      R_AIC2 (0x14)      *
+ ***************************/
+
+/* Field Offsets */
+#define FB_AIC2_DACDSEL                      6
+#define FB_AIC2_ADCDSEL                      4
+#define FB_AIC2_TRI                          3
+#define FB_AIC2_BLRCM                        0
+
+/* Field Masks */
+#define FM_AIC2_DACDSEL                      0X3
+#define FM_AIC2_ADCDSEL                      0X3
+#define FM_AIC2_TRI                          0X1
+#define FM_AIC2_BLRCM                        0X7
+
+/* Field Values */
+#define FV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED  0x3
+
+/* Register Masks */
+#define RM_AIC2_DACDSEL \
+        RM(FM_AIC2_DACDSEL, FB_AIC2_DACDSEL)
+
+#define RM_AIC2_ADCDSEL \
+        RM(FM_AIC2_ADCDSEL, FB_AIC2_ADCDSEL)
+
+#define RM_AIC2_TRI                          RM(FM_AIC2_TRI, FB_AIC2_TRI)
+#define RM_AIC2_BLRCM                        RM(FM_AIC2_BLRCM, FB_AIC2_BLRCM)
+
+/* Register Values */
+#define RV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED \
+        RV(FV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED, FB_AIC2_BLRCM)
+
+
+/******************************
+ *      R_CNVRTR0 (0x16)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CNVRTR0_ADCPOLR                   7
+#define FB_CNVRTR0_ADCPOLL                   6
+#define FB_CNVRTR0_AMONOMIX                  4
+#define FB_CNVRTR0_ADCMU                     3
+#define FB_CNVRTR0_HPOR                      2
+#define FB_CNVRTR0_ADCHPDR                   1
+#define FB_CNVRTR0_ADCHPDL                   0
+
+/* Field Masks */
+#define FM_CNVRTR0_ADCPOLR                   0X1
+#define FM_CNVRTR0_ADCPOLL                   0X1
+#define FM_CNVRTR0_AMONOMIX                  0X3
+#define FM_CNVRTR0_ADCMU                     0X1
+#define FM_CNVRTR0_HPOR                      0X1
+#define FM_CNVRTR0_ADCHPDR                   0X1
+#define FM_CNVRTR0_ADCHPDL                   0X1
+
+/* Field Values */
+#define FV_CNVRTR0_ADCPOLR_INVERT            0x1
+#define FV_CNVRTR0_ADCPOLR_NORMAL            0x0
+#define FV_CNVRTR0_ADCPOLL_INVERT            0x1
+#define FV_CNVRTR0_ADCPOLL_NORMAL            0x0
+#define FV_CNVRTR0_ADCMU_ENABLE              0x1
+#define FV_CNVRTR0_ADCMU_DISABLE             0x0
+#define FV_CNVRTR0_ADCHPDR_ENABLE            0x1
+#define FV_CNVRTR0_ADCHPDR_DISABLE           0x0
+#define FV_CNVRTR0_ADCHPDL_ENABLE            0x1
+#define FV_CNVRTR0_ADCHPDL_DISABLE           0x0
+
+/* Register Masks */
+#define RM_CNVRTR0_ADCPOLR \
+        RM(FM_CNVRTR0_ADCPOLR, FB_CNVRTR0_ADCPOLR)
+
+#define RM_CNVRTR0_ADCPOLL \
+        RM(FM_CNVRTR0_ADCPOLL, FB_CNVRTR0_ADCPOLL)
+
+#define RM_CNVRTR0_AMONOMIX \
+        RM(FM_CNVRTR0_AMONOMIX, FB_CNVRTR0_AMONOMIX)
+
+#define RM_CNVRTR0_ADCMU \
+        RM(FM_CNVRTR0_ADCMU, FB_CNVRTR0_ADCMU)
+
+#define RM_CNVRTR0_HPOR \
+        RM(FM_CNVRTR0_HPOR, FB_CNVRTR0_HPOR)
+
+#define RM_CNVRTR0_ADCHPDR \
+        RM(FM_CNVRTR0_ADCHPDR, FB_CNVRTR0_ADCHPDR)
+
+#define RM_CNVRTR0_ADCHPDL \
+        RM(FM_CNVRTR0_ADCHPDL, FB_CNVRTR0_ADCHPDL)
+
+
+/* Register Values */
+#define RV_CNVRTR0_ADCPOLR_INVERT \
+        RV(FV_CNVRTR0_ADCPOLR_INVERT, FB_CNVRTR0_ADCPOLR)
+
+#define RV_CNVRTR0_ADCPOLR_NORMAL \
+        RV(FV_CNVRTR0_ADCPOLR_NORMAL, FB_CNVRTR0_ADCPOLR)
+
+#define RV_CNVRTR0_ADCPOLL_INVERT \
+        RV(FV_CNVRTR0_ADCPOLL_INVERT, FB_CNVRTR0_ADCPOLL)
+
+#define RV_CNVRTR0_ADCPOLL_NORMAL \
+        RV(FV_CNVRTR0_ADCPOLL_NORMAL, FB_CNVRTR0_ADCPOLL)
+
+#define RV_CNVRTR0_ADCMU_ENABLE \
+        RV(FV_CNVRTR0_ADCMU_ENABLE, FB_CNVRTR0_ADCMU)
+
+#define RV_CNVRTR0_ADCMU_DISABLE \
+        RV(FV_CNVRTR0_ADCMU_DISABLE, FB_CNVRTR0_ADCMU)
+
+#define RV_CNVRTR0_ADCHPDR_ENABLE \
+        RV(FV_CNVRTR0_ADCHPDR_ENABLE, FB_CNVRTR0_ADCHPDR)
+
+#define RV_CNVRTR0_ADCHPDR_DISABLE \
+        RV(FV_CNVRTR0_ADCHPDR_DISABLE, FB_CNVRTR0_ADCHPDR)
+
+#define RV_CNVRTR0_ADCHPDL_ENABLE \
+        RV(FV_CNVRTR0_ADCHPDL_ENABLE, FB_CNVRTR0_ADCHPDL)
+
+#define RV_CNVRTR0_ADCHPDL_DISABLE \
+        RV(FV_CNVRTR0_ADCHPDL_DISABLE, FB_CNVRTR0_ADCHPDL)
+
+
+/****************************
+ *      R_ADCSR (0x17)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_ADCSR_ABCM                        6
+#define FB_ADCSR_ABR                         3
+#define FB_ADCSR_ABM                         0
+
+/* Field Masks */
+#define FM_ADCSR_ABCM                        0X3
+#define FM_ADCSR_ABR                         0X3
+#define FM_ADCSR_ABM                         0X7
+
+/* Field Values */
+#define FV_ADCSR_ABCM_AUTO                   0x0
+#define FV_ADCSR_ABCM_32                     0x1
+#define FV_ADCSR_ABCM_40                     0x2
+#define FV_ADCSR_ABCM_64                     0x3
+#define FV_ADCSR_ABR_32                      0x0
+#define FV_ADCSR_ABR_44_1                    0x1
+#define FV_ADCSR_ABR_48                      0x2
+#define FV_ADCSR_ABM_PT25                    0x0
+#define FV_ADCSR_ABM_PT5                     0x1
+#define FV_ADCSR_ABM_1                       0x2
+#define FV_ADCSR_ABM_2                       0x3
+
+/* Register Masks */
+#define RM_ADCSR_ABCM                        RM(FM_ADCSR_ABCM, FB_ADCSR_ABCM)
+#define RM_ADCSR_ABR                         RM(FM_ADCSR_ABR, FB_ADCSR_ABR)
+#define RM_ADCSR_ABM                         RM(FM_ADCSR_ABM, FB_ADCSR_ABM)
+
+/* Register Values */
+#define RV_ADCSR_ABCM_AUTO \
+        RV(FV_ADCSR_ABCM_AUTO, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_32 \
+        RV(FV_ADCSR_ABCM_32, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_40 \
+        RV(FV_ADCSR_ABCM_40, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_64 \
+        RV(FV_ADCSR_ABCM_64, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABR_32                      RV(FV_ADCSR_ABR_32, FB_ADCSR_ABR)
+#define RV_ADCSR_ABR_44_1 \
+        RV(FV_ADCSR_ABR_44_1, FB_ADCSR_ABR)
+
+#define RV_ADCSR_ABR_48                      RV(FV_ADCSR_ABR_48, FB_ADCSR_ABR)
+#define RV_ADCSR_ABR_                        RV(FV_ADCSR_ABR_, FB_ADCSR_ABR)
+#define RV_ADCSR_ABM_PT25 \
+        RV(FV_ADCSR_ABM_PT25, FB_ADCSR_ABM)
+
+#define RV_ADCSR_ABM_PT5                     RV(FV_ADCSR_ABM_PT5, FB_ADCSR_ABM)
+#define RV_ADCSR_ABM_1                       RV(FV_ADCSR_ABM_1, FB_ADCSR_ABM)
+#define RV_ADCSR_ABM_2                       RV(FV_ADCSR_ABM_2, FB_ADCSR_ABM)
+
+/******************************
+ *      R_CNVRTR1 (0x18)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CNVRTR1_DACPOLR                   7
+#define FB_CNVRTR1_DACPOLL                   6
+#define FB_CNVRTR1_DMONOMIX                  4
+#define FB_CNVRTR1_DACMU                     3
+#define FB_CNVRTR1_DEEMPH                    2
+#define FB_CNVRTR1_DACDITH                   0
+
+/* Field Masks */
+#define FM_CNVRTR1_DACPOLR                   0X1
+#define FM_CNVRTR1_DACPOLL                   0X1
+#define FM_CNVRTR1_DMONOMIX                  0X3
+#define FM_CNVRTR1_DACMU                     0X1
+#define FM_CNVRTR1_DEEMPH                    0X1
+#define FM_CNVRTR1_DACDITH                   0X3
+
+/* Field Values */
+#define FV_CNVRTR1_DACPOLR_INVERT            0x1
+#define FV_CNVRTR1_DACPOLR_NORMAL            0x0
+#define FV_CNVRTR1_DACPOLL_INVERT            0x1
+#define FV_CNVRTR1_DACPOLL_NORMAL            0x0
+#define FV_CNVRTR1_DMONOMIX_ENABLE           0x1
+#define FV_CNVRTR1_DMONOMIX_DISABLE          0x0
+#define FV_CNVRTR1_DACMU_ENABLE              0x1
+#define FV_CNVRTR1_DACMU_DISABLE             0x0
+
+/* Register Masks */
+#define RM_CNVRTR1_DACPOLR \
+        RM(FM_CNVRTR1_DACPOLR, FB_CNVRTR1_DACPOLR)
+
+#define RM_CNVRTR1_DACPOLL \
+        RM(FM_CNVRTR1_DACPOLL, FB_CNVRTR1_DACPOLL)
+
+#define RM_CNVRTR1_DMONOMIX \
+        RM(FM_CNVRTR1_DMONOMIX, FB_CNVRTR1_DMONOMIX)
+
+#define RM_CNVRTR1_DACMU \
+        RM(FM_CNVRTR1_DACMU, FB_CNVRTR1_DACMU)
+
+#define RM_CNVRTR1_DEEMPH \
+        RM(FM_CNVRTR1_DEEMPH, FB_CNVRTR1_DEEMPH)
+
+#define RM_CNVRTR1_DACDITH \
+        RM(FM_CNVRTR1_DACDITH, FB_CNVRTR1_DACDITH)
+
+
+/* Register Values */
+#define RV_CNVRTR1_DACPOLR_INVERT \
+        RV(FV_CNVRTR1_DACPOLR_INVERT, FB_CNVRTR1_DACPOLR)
+
+#define RV_CNVRTR1_DACPOLR_NORMAL \
+        RV(FV_CNVRTR1_DACPOLR_NORMAL, FB_CNVRTR1_DACPOLR)
+
+#define RV_CNVRTR1_DACPOLL_INVERT \
+        RV(FV_CNVRTR1_DACPOLL_INVERT, FB_CNVRTR1_DACPOLL)
+
+#define RV_CNVRTR1_DACPOLL_NORMAL \
+        RV(FV_CNVRTR1_DACPOLL_NORMAL, FB_CNVRTR1_DACPOLL)
+
+#define RV_CNVRTR1_DMONOMIX_ENABLE \
+        RV(FV_CNVRTR1_DMONOMIX_ENABLE, FB_CNVRTR1_DMONOMIX)
+
+#define RV_CNVRTR1_DMONOMIX_DISABLE \
+        RV(FV_CNVRTR1_DMONOMIX_DISABLE, FB_CNVRTR1_DMONOMIX)
+
+#define RV_CNVRTR1_DACMU_ENABLE \
+        RV(FV_CNVRTR1_DACMU_ENABLE, FB_CNVRTR1_DACMU)
+
+#define RV_CNVRTR1_DACMU_DISABLE \
+        RV(FV_CNVRTR1_DACMU_DISABLE, FB_CNVRTR1_DACMU)
+
+
+/****************************
+ *      R_DACSR (0x19)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_DACSR_DBCM                        6
+#define FB_DACSR_DBR                         3
+#define FB_DACSR_DBM                         0
+
+/* Field Masks */
+#define FM_DACSR_DBCM                        0X3
+#define FM_DACSR_DBR                         0X3
+#define FM_DACSR_DBM                         0X7
+
+/* Field Values */
+#define FV_DACSR_DBCM_AUTO                   0x0
+#define FV_DACSR_DBCM_32                     0x1
+#define FV_DACSR_DBCM_40                     0x2
+#define FV_DACSR_DBCM_64                     0x3
+#define FV_DACSR_DBR_32                      0x0
+#define FV_DACSR_DBR_44_1                    0x1
+#define FV_DACSR_DBR_48                      0x2
+#define FV_DACSR_DBM_PT25                    0x0
+#define FV_DACSR_DBM_PT5                     0x1
+#define FV_DACSR_DBM_1                       0x2
+#define FV_DACSR_DBM_2                       0x3
+
+/* Register Masks */
+#define RM_DACSR_DBCM                        RM(FM_DACSR_DBCM, FB_DACSR_DBCM)
+#define RM_DACSR_DBR                         RM(FM_DACSR_DBR, FB_DACSR_DBR)
+#define RM_DACSR_DBM                         RM(FM_DACSR_DBM, FB_DACSR_DBM)
+
+/* Register Values */
+#define RV_DACSR_DBCM_AUTO \
+        RV(FV_DACSR_DBCM_AUTO, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_32 \
+        RV(FV_DACSR_DBCM_32, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_40 \
+        RV(FV_DACSR_DBCM_40, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_64 \
+        RV(FV_DACSR_DBCM_64, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBR_32                      RV(FV_DACSR_DBR_32, FB_DACSR_DBR)
+#define RV_DACSR_DBR_44_1 \
+        RV(FV_DACSR_DBR_44_1, FB_DACSR_DBR)
+
+#define RV_DACSR_DBR_48                      RV(FV_DACSR_DBR_48, FB_DACSR_DBR)
+#define RV_DACSR_DBM_PT25 \
+        RV(FV_DACSR_DBM_PT25, FB_DACSR_DBM)
+
+#define RV_DACSR_DBM_PT5                     RV(FV_DACSR_DBM_PT5, FB_DACSR_DBM)
+#define RV_DACSR_DBM_1                       RV(FV_DACSR_DBM_1, FB_DACSR_DBM)
+#define RV_DACSR_DBM_2                       RV(FV_DACSR_DBM_2, FB_DACSR_DBM)
+
+/****************************
+ *      R_PWRM1 (0x1A)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_PWRM1_BSTL                        7
+#define FB_PWRM1_BSTR                        6
+#define FB_PWRM1_PGAL                        5
+#define FB_PWRM1_PGAR                        4
+#define FB_PWRM1_ADCL                        3
+#define FB_PWRM1_ADCR                        2
+#define FB_PWRM1_MICB                        1
+#define FB_PWRM1_DIGENB                      0
+
+/* Field Masks */
+#define FM_PWRM1_BSTL                        0X1
+#define FM_PWRM1_BSTR                        0X1
+#define FM_PWRM1_PGAL                        0X1
+#define FM_PWRM1_PGAR                        0X1
+#define FM_PWRM1_ADCL                        0X1
+#define FM_PWRM1_ADCR                        0X1
+#define FM_PWRM1_MICB                        0X1
+#define FM_PWRM1_DIGENB                      0X1
+
+/* Field Values */
+#define FV_PWRM1_BSTL_ENABLE                 0x1
+#define FV_PWRM1_BSTL_DISABLE                0x0
+#define FV_PWRM1_BSTR_ENABLE                 0x1
+#define FV_PWRM1_BSTR_DISABLE                0x0
+#define FV_PWRM1_PGAL_ENABLE                 0x1
+#define FV_PWRM1_PGAL_DISABLE                0x0
+#define FV_PWRM1_PGAR_ENABLE                 0x1
+#define FV_PWRM1_PGAR_DISABLE                0x0
+#define FV_PWRM1_ADCL_ENABLE                 0x1
+#define FV_PWRM1_ADCL_DISABLE                0x0
+#define FV_PWRM1_ADCR_ENABLE                 0x1
+#define FV_PWRM1_ADCR_DISABLE                0x0
+#define FV_PWRM1_MICB_ENABLE                 0x1
+#define FV_PWRM1_MICB_DISABLE                0x0
+#define FV_PWRM1_DIGENB_DISABLE              0x1
+#define FV_PWRM1_DIGENB_ENABLE               0x0
+
+/* Register Masks */
+#define RM_PWRM1_BSTL                        RM(FM_PWRM1_BSTL, FB_PWRM1_BSTL)
+#define RM_PWRM1_BSTR                        RM(FM_PWRM1_BSTR, FB_PWRM1_BSTR)
+#define RM_PWRM1_PGAL                        RM(FM_PWRM1_PGAL, FB_PWRM1_PGAL)
+#define RM_PWRM1_PGAR                        RM(FM_PWRM1_PGAR, FB_PWRM1_PGAR)
+#define RM_PWRM1_ADCL                        RM(FM_PWRM1_ADCL, FB_PWRM1_ADCL)
+#define RM_PWRM1_ADCR                        RM(FM_PWRM1_ADCR, FB_PWRM1_ADCR)
+#define RM_PWRM1_MICB                        RM(FM_PWRM1_MICB, FB_PWRM1_MICB)
+#define RM_PWRM1_DIGENB \
+        RM(FM_PWRM1_DIGENB, FB_PWRM1_DIGENB)
+
+
+/* Register Values */
+#define RV_PWRM1_BSTL_ENABLE \
+        RV(FV_PWRM1_BSTL_ENABLE, FB_PWRM1_BSTL)
+
+#define RV_PWRM1_BSTL_DISABLE \
+        RV(FV_PWRM1_BSTL_DISABLE, FB_PWRM1_BSTL)
+
+#define RV_PWRM1_BSTR_ENABLE \
+        RV(FV_PWRM1_BSTR_ENABLE, FB_PWRM1_BSTR)
+
+#define RV_PWRM1_BSTR_DISABLE \
+        RV(FV_PWRM1_BSTR_DISABLE, FB_PWRM1_BSTR)
+
+#define RV_PWRM1_PGAL_ENABLE \
+        RV(FV_PWRM1_PGAL_ENABLE, FB_PWRM1_PGAL)
+
+#define RV_PWRM1_PGAL_DISABLE \
+        RV(FV_PWRM1_PGAL_DISABLE, FB_PWRM1_PGAL)
+
+#define RV_PWRM1_PGAR_ENABLE \
+        RV(FV_PWRM1_PGAR_ENABLE, FB_PWRM1_PGAR)
+
+#define RV_PWRM1_PGAR_DISABLE \
+        RV(FV_PWRM1_PGAR_DISABLE, FB_PWRM1_PGAR)
+
+#define RV_PWRM1_ADCL_ENABLE \
+        RV(FV_PWRM1_ADCL_ENABLE, FB_PWRM1_ADCL)
+
+#define RV_PWRM1_ADCL_DISABLE \
+        RV(FV_PWRM1_ADCL_DISABLE, FB_PWRM1_ADCL)
+
+#define RV_PWRM1_ADCR_ENABLE \
+        RV(FV_PWRM1_ADCR_ENABLE, FB_PWRM1_ADCR)
+
+#define RV_PWRM1_ADCR_DISABLE \
+        RV(FV_PWRM1_ADCR_DISABLE, FB_PWRM1_ADCR)
+
+#define RV_PWRM1_MICB_ENABLE \
+        RV(FV_PWRM1_MICB_ENABLE, FB_PWRM1_MICB)
+
+#define RV_PWRM1_MICB_DISABLE \
+        RV(FV_PWRM1_MICB_DISABLE, FB_PWRM1_MICB)
+
+#define RV_PWRM1_DIGENB_DISABLE \
+        RV(FV_PWRM1_DIGENB_DISABLE, FB_PWRM1_DIGENB)
+
+#define RV_PWRM1_DIGENB_ENABLE \
+        RV(FV_PWRM1_DIGENB_ENABLE, FB_PWRM1_DIGENB)
+
+
+/****************************
+ *      R_PWRM2 (0x1B)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_PWRM2_D2S                         7
+#define FB_PWRM2_HPL                         6
+#define FB_PWRM2_HPR                         5
+#define FB_PWRM2_SPKL                        4
+#define FB_PWRM2_SPKR                        3
+#define FB_PWRM2_INSELL                      2
+#define FB_PWRM2_INSELR                      1
+#define FB_PWRM2_VREF                        0
+
+/* Field Masks */
+#define FM_PWRM2_D2S                         0X1
+#define FM_PWRM2_HPL                         0X1
+#define FM_PWRM2_HPR                         0X1
+#define FM_PWRM2_SPKL                        0X1
+#define FM_PWRM2_SPKR                        0X1
+#define FM_PWRM2_INSELL                      0X1
+#define FM_PWRM2_INSELR                      0X1
+#define FM_PWRM2_VREF                        0X1
+
+/* Field Values */
+#define FV_PWRM2_D2S_ENABLE                  0x1
+#define FV_PWRM2_D2S_DISABLE                 0x0
+#define FV_PWRM2_HPL_ENABLE                  0x1
+#define FV_PWRM2_HPL_DISABLE                 0x0
+#define FV_PWRM2_HPR_ENABLE                  0x1
+#define FV_PWRM2_HPR_DISABLE                 0x0
+#define FV_PWRM2_SPKL_ENABLE                 0x1
+#define FV_PWRM2_SPKL_DISABLE                0x0
+#define FV_PWRM2_SPKR_ENABLE                 0x1
+#define FV_PWRM2_SPKR_DISABLE                0x0
+#define FV_PWRM2_INSELL_ENABLE               0x1
+#define FV_PWRM2_INSELL_DISABLE              0x0
+#define FV_PWRM2_INSELR_ENABLE               0x1
+#define FV_PWRM2_INSELR_DISABLE              0x0
+#define FV_PWRM2_VREF_ENABLE                 0x1
+#define FV_PWRM2_VREF_DISABLE                0x0
+
+/* Register Masks */
+#define RM_PWRM2_D2S                         RM(FM_PWRM2_D2S, FB_PWRM2_D2S)
+#define RM_PWRM2_HPL                         RM(FM_PWRM2_HPL, FB_PWRM2_HPL)
+#define RM_PWRM2_HPR                         RM(FM_PWRM2_HPR, FB_PWRM2_HPR)
+#define RM_PWRM2_SPKL                        RM(FM_PWRM2_SPKL, FB_PWRM2_SPKL)
+#define RM_PWRM2_SPKR                        RM(FM_PWRM2_SPKR, FB_PWRM2_SPKR)
+#define RM_PWRM2_INSELL \
+        RM(FM_PWRM2_INSELL, FB_PWRM2_INSELL)
+
+#define RM_PWRM2_INSELR \
+        RM(FM_PWRM2_INSELR, FB_PWRM2_INSELR)
+
+#define RM_PWRM2_VREF                        RM(FM_PWRM2_VREF, FB_PWRM2_VREF)
+
+/* Register Values */
+#define RV_PWRM2_D2S_ENABLE \
+        RV(FV_PWRM2_D2S_ENABLE, FB_PWRM2_D2S)
+
+#define RV_PWRM2_D2S_DISABLE \
+        RV(FV_PWRM2_D2S_DISABLE, FB_PWRM2_D2S)
+
+#define RV_PWRM2_HPL_ENABLE \
+        RV(FV_PWRM2_HPL_ENABLE, FB_PWRM2_HPL)
+
+#define RV_PWRM2_HPL_DISABLE \
+        RV(FV_PWRM2_HPL_DISABLE, FB_PWRM2_HPL)
+
+#define RV_PWRM2_HPR_ENABLE \
+        RV(FV_PWRM2_HPR_ENABLE, FB_PWRM2_HPR)
+
+#define RV_PWRM2_HPR_DISABLE \
+        RV(FV_PWRM2_HPR_DISABLE, FB_PWRM2_HPR)
+
+#define RV_PWRM2_SPKL_ENABLE \
+        RV(FV_PWRM2_SPKL_ENABLE, FB_PWRM2_SPKL)
+
+#define RV_PWRM2_SPKL_DISABLE \
+        RV(FV_PWRM2_SPKL_DISABLE, FB_PWRM2_SPKL)
+
+#define RV_PWRM2_SPKR_ENABLE \
+        RV(FV_PWRM2_SPKR_ENABLE, FB_PWRM2_SPKR)
+
+#define RV_PWRM2_SPKR_DISABLE \
+        RV(FV_PWRM2_SPKR_DISABLE, FB_PWRM2_SPKR)
+
+#define RV_PWRM2_INSELL_ENABLE \
+        RV(FV_PWRM2_INSELL_ENABLE, FB_PWRM2_INSELL)
+
+#define RV_PWRM2_INSELL_DISABLE \
+        RV(FV_PWRM2_INSELL_DISABLE, FB_PWRM2_INSELL)
+
+#define RV_PWRM2_INSELR_ENABLE \
+        RV(FV_PWRM2_INSELR_ENABLE, FB_PWRM2_INSELR)
+
+#define RV_PWRM2_INSELR_DISABLE \
+        RV(FV_PWRM2_INSELR_DISABLE, FB_PWRM2_INSELR)
+
+#define RV_PWRM2_VREF_ENABLE \
+        RV(FV_PWRM2_VREF_ENABLE, FB_PWRM2_VREF)
+
+#define RV_PWRM2_VREF_DISABLE \
+        RV(FV_PWRM2_VREF_DISABLE, FB_PWRM2_VREF)
+
+
+/******************************
+ *      R_CONFIG0 (0x1F)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CONFIG0_ASDM                      6
+#define FB_CONFIG0_DSDM                      4
+#define FB_CONFIG0_DC_BYPASS                 1
+#define FB_CONFIG0_SD_FORCE_ON               0
+
+/* Field Masks */
+#define FM_CONFIG0_ASDM                      0X3
+#define FM_CONFIG0_DSDM                      0X3
+#define FM_CONFIG0_DC_BYPASS                 0X1
+#define FM_CONFIG0_SD_FORCE_ON               0X1
+
+/* Field Values */
+#define FV_CONFIG0_ASDM_HALF                 0x1
+#define FV_CONFIG0_ASDM_FULL                 0x2
+#define FV_CONFIG0_ASDM_AUTO                 0x3
+#define FV_CONFIG0_DSDM_HALF                 0x1
+#define FV_CONFIG0_DSDM_FULL                 0x2
+#define FV_CONFIG0_DSDM_AUTO                 0x3
+#define FV_CONFIG0_DC_BYPASS_ENABLE          0x1
+#define FV_CONFIG0_DC_BYPASS_DISABLE         0x0
+#define FV_CONFIG0_SD_FORCE_ON_ENABLE        0x1
+#define FV_CONFIG0_SD_FORCE_ON_DISABLE       0x0
+
+/* Register Masks */
+#define RM_CONFIG0_ASDM \
+        RM(FM_CONFIG0_ASDM, FB_CONFIG0_ASDM)
+
+#define RM_CONFIG0_DSDM \
+        RM(FM_CONFIG0_DSDM, FB_CONFIG0_DSDM)
+
+#define RM_CONFIG0_DC_BYPASS \
+        RM(FM_CONFIG0_DC_BYPASS, FB_CONFIG0_DC_BYPASS)
+
+#define RM_CONFIG0_SD_FORCE_ON \
+        RM(FM_CONFIG0_SD_FORCE_ON, FB_CONFIG0_SD_FORCE_ON)
+
+
+/* Register Values */
+#define RV_CONFIG0_ASDM_HALF \
+        RV(FV_CONFIG0_ASDM_HALF, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_ASDM_FULL \
+        RV(FV_CONFIG0_ASDM_FULL, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_ASDM_AUTO \
+        RV(FV_CONFIG0_ASDM_AUTO, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_DSDM_HALF \
+        RV(FV_CONFIG0_DSDM_HALF, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DSDM_FULL \
+        RV(FV_CONFIG0_DSDM_FULL, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DSDM_AUTO \
+        RV(FV_CONFIG0_DSDM_AUTO, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DC_BYPASS_ENABLE \
+        RV(FV_CONFIG0_DC_BYPASS_ENABLE, FB_CONFIG0_DC_BYPASS)
+
+#define RV_CONFIG0_DC_BYPASS_DISABLE \
+        RV(FV_CONFIG0_DC_BYPASS_DISABLE, FB_CONFIG0_DC_BYPASS)
+
+#define RV_CONFIG0_SD_FORCE_ON_ENABLE \
+        RV(FV_CONFIG0_SD_FORCE_ON_ENABLE, FB_CONFIG0_SD_FORCE_ON)
+
+#define RV_CONFIG0_SD_FORCE_ON_DISABLE \
+        RV(FV_CONFIG0_SD_FORCE_ON_DISABLE, FB_CONFIG0_SD_FORCE_ON)
+
+
+/******************************
+ *      R_CONFIG1 (0x20)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CONFIG1_EQ2_EN                    7
+#define FB_CONFIG1_EQ2_BE                    4
+#define FB_CONFIG1_EQ1_EN                    3
+#define FB_CONFIG1_EQ1_BE                    0
+
+/* Field Masks */
+#define FM_CONFIG1_EQ2_EN                    0X1
+#define FM_CONFIG1_EQ2_BE                    0X7
+#define FM_CONFIG1_EQ1_EN                    0X1
+#define FM_CONFIG1_EQ1_BE                    0X7
+
+/* Field Values */
+#define FV_CONFIG1_EQ2_EN_ENABLE             0x1
+#define FV_CONFIG1_EQ2_EN_DISABLE            0x0
+#define FV_CONFIG1_EQ2_BE_PRE                0x0
+#define FV_CONFIG1_EQ2_BE_PRE_EQ_0           0x1
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_1          0x2
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_2          0x3
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_3          0x4
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_4          0x5
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_5          0x6
+#define FV_CONFIG1_EQ1_EN_ENABLE             0x1
+#define FV_CONFIG1_EQ1_EN_DISABLE            0x0
+#define FV_CONFIG1_EQ1_BE_PRE                0x0
+#define FV_CONFIG1_EQ1_BE_PRE_EQ_0           0x1
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_1          0x2
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_2          0x3
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_3          0x4
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_4          0x5
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_5          0x6
+
+/* Register Masks */
+#define RM_CONFIG1_EQ2_EN \
+        RM(FM_CONFIG1_EQ2_EN, FB_CONFIG1_EQ2_EN)
+
+#define RM_CONFIG1_EQ2_BE \
+        RM(FM_CONFIG1_EQ2_BE, FB_CONFIG1_EQ2_BE)
+
+#define RM_CONFIG1_EQ1_EN \
+        RM(FM_CONFIG1_EQ1_EN, FB_CONFIG1_EQ1_EN)
+
+#define RM_CONFIG1_EQ1_BE \
+        RM(FM_CONFIG1_EQ1_BE, FB_CONFIG1_EQ1_BE)
+
+
+/* Register Values */
+#define RV_CONFIG1_EQ2_EN_ENABLE \
+        RV(FV_CONFIG1_EQ2_EN_ENABLE, FB_CONFIG1_EQ2_EN)
+
+#define RV_CONFIG1_EQ2_EN_DISABLE \
+        RV(FV_CONFIG1_EQ2_EN_DISABLE, FB_CONFIG1_EQ2_EN)
+
+#define RV_CONFIG1_EQ2_BE_PRE \
+        RV(FV_CONFIG1_EQ2_BE_PRE, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ_0 \
+        RV(FV_CONFIG1_EQ2_BE_PRE_EQ_0, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_1 \
+        RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_1, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_2 \
+        RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_2, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_3 \
+        RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_3, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_4 \
+        RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_4, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_5 \
+        RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_5, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ1_EN_ENABLE \
+        RV(FV_CONFIG1_EQ1_EN_ENABLE, FB_CONFIG1_EQ1_EN)
+
+#define RV_CONFIG1_EQ1_EN_DISABLE \
+        RV(FV_CONFIG1_EQ1_EN_DISABLE, FB_CONFIG1_EQ1_EN)
+
+#define RV_CONFIG1_EQ1_BE_PRE \
+        RV(FV_CONFIG1_EQ1_BE_PRE, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ_0 \
+        RV(FV_CONFIG1_EQ1_BE_PRE_EQ_0, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_1 \
+        RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_1, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_2 \
+        RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_2, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_3 \
+        RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_3, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_4 \
+        RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_4, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_5 \
+        RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_5, FB_CONFIG1_EQ1_BE)
+
+
+/******************************
+ *      R_DMICCTL (0x24)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_DMICCTL_DMICEN                    7
+#define FB_DMICCTL_DMONO                     4
+#define FB_DMICCTL_DMPHADJ                   2
+#define FB_DMICCTL_DMRATE                    0
+
+/* Field Masks */
+#define FM_DMICCTL_DMICEN                    0X1
+#define FM_DMICCTL_DMONO                     0X1
+#define FM_DMICCTL_DMPHADJ                   0X3
+#define FM_DMICCTL_DMRATE                    0X3
+
+/* Field Values */
+#define FV_DMICCTL_DMICEN_ENABLE             0x1
+#define FV_DMICCTL_DMICEN_DISABLE            0x0
+#define FV_DMICCTL_DMONO_STEREO              0x0
+#define FV_DMICCTL_DMONO_MONO                0x1
+
+/* Register Masks */
+#define RM_DMICCTL_DMICEN \
+        RM(FM_DMICCTL_DMICEN, FB_DMICCTL_DMICEN)
+
+#define RM_DMICCTL_DMONO \
+        RM(FM_DMICCTL_DMONO, FB_DMICCTL_DMONO)
+
+#define RM_DMICCTL_DMPHADJ \
+        RM(FM_DMICCTL_DMPHADJ, FB_DMICCTL_DMPHADJ)
+
+#define RM_DMICCTL_DMRATE \
+        RM(FM_DMICCTL_DMRATE, FB_DMICCTL_DMRATE)
+
+
+/* Register Values */
+#define RV_DMICCTL_DMICEN_ENABLE \
+        RV(FV_DMICCTL_DMICEN_ENABLE, FB_DMICCTL_DMICEN)
+
+#define RV_DMICCTL_DMICEN_DISABLE \
+        RV(FV_DMICCTL_DMICEN_DISABLE, FB_DMICCTL_DMICEN)
+
+#define RV_DMICCTL_DMONO_STEREO \
+        RV(FV_DMICCTL_DMONO_STEREO, FB_DMICCTL_DMONO)
+
+#define RV_DMICCTL_DMONO_MONO \
+        RV(FV_DMICCTL_DMONO_MONO, FB_DMICCTL_DMONO)
+
+
+/*****************************
+ *      R_CLECTL (0x25)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_CLECTL_LVL_MODE                   4
+#define FB_CLECTL_WINDOWSEL                  3
+#define FB_CLECTL_EXP_EN                     2
+#define FB_CLECTL_LIMIT_EN                   1
+#define FB_CLECTL_COMP_EN                    0
+
+/* Field Masks */
+#define FM_CLECTL_LVL_MODE                   0X1
+#define FM_CLECTL_WINDOWSEL                  0X1
+#define FM_CLECTL_EXP_EN                     0X1
+#define FM_CLECTL_LIMIT_EN                   0X1
+#define FM_CLECTL_COMP_EN                    0X1
+
+/* Field Values */
+#define FV_CLECTL_LVL_MODE_AVG               0x0
+#define FV_CLECTL_LVL_MODE_PEAK              0x1
+#define FV_CLECTL_WINDOWSEL_512              0x0
+#define FV_CLECTL_WINDOWSEL_64               0x1
+#define FV_CLECTL_EXP_EN_ENABLE              0x1
+#define FV_CLECTL_EXP_EN_DISABLE             0x0
+#define FV_CLECTL_LIMIT_EN_ENABLE            0x1
+#define FV_CLECTL_LIMIT_EN_DISABLE           0x0
+#define FV_CLECTL_COMP_EN_ENABLE             0x1
+#define FV_CLECTL_COMP_EN_DISABLE            0x0
+
+/* Register Masks */
+#define RM_CLECTL_LVL_MODE \
+        RM(FM_CLECTL_LVL_MODE, FB_CLECTL_LVL_MODE)
+
+#define RM_CLECTL_WINDOWSEL \
+        RM(FM_CLECTL_WINDOWSEL, FB_CLECTL_WINDOWSEL)
+
+#define RM_CLECTL_EXP_EN \
+        RM(FM_CLECTL_EXP_EN, FB_CLECTL_EXP_EN)
+
+#define RM_CLECTL_LIMIT_EN \
+        RM(FM_CLECTL_LIMIT_EN, FB_CLECTL_LIMIT_EN)
+
+#define RM_CLECTL_COMP_EN \
+        RM(FM_CLECTL_COMP_EN, FB_CLECTL_COMP_EN)
+
+
+/* Register Values */
+#define RV_CLECTL_LVL_MODE_AVG \
+        RV(FV_CLECTL_LVL_MODE_AVG, FB_CLECTL_LVL_MODE)
+
+#define RV_CLECTL_LVL_MODE_PEAK \
+        RV(FV_CLECTL_LVL_MODE_PEAK, FB_CLECTL_LVL_MODE)
+
+#define RV_CLECTL_WINDOWSEL_512 \
+        RV(FV_CLECTL_WINDOWSEL_512, FB_CLECTL_WINDOWSEL)
+
+#define RV_CLECTL_WINDOWSEL_64 \
+        RV(FV_CLECTL_WINDOWSEL_64, FB_CLECTL_WINDOWSEL)
+
+#define RV_CLECTL_EXP_EN_ENABLE \
+        RV(FV_CLECTL_EXP_EN_ENABLE, FB_CLECTL_EXP_EN)
+
+#define RV_CLECTL_EXP_EN_DISABLE \
+        RV(FV_CLECTL_EXP_EN_DISABLE, FB_CLECTL_EXP_EN)
+
+#define RV_CLECTL_LIMIT_EN_ENABLE \
+        RV(FV_CLECTL_LIMIT_EN_ENABLE, FB_CLECTL_LIMIT_EN)
+
+#define RV_CLECTL_LIMIT_EN_DISABLE \
+        RV(FV_CLECTL_LIMIT_EN_DISABLE, FB_CLECTL_LIMIT_EN)
+
+#define RV_CLECTL_COMP_EN_ENABLE \
+        RV(FV_CLECTL_COMP_EN_ENABLE, FB_CLECTL_COMP_EN)
+
+#define RV_CLECTL_COMP_EN_DISABLE \
+        RV(FV_CLECTL_COMP_EN_DISABLE, FB_CLECTL_COMP_EN)
+
+
+/*****************************
+ *      R_MUGAIN (0x26)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_MUGAIN_CLEMUG                     0
+
+/* Field Masks */
+#define FM_MUGAIN_CLEMUG                     0X1F
+
+/* Field Values */
+#define FV_MUGAIN_CLEMUG_46PT5DB             0x1F
+#define FV_MUGAIN_CLEMUG_0DB                 0x0
+
+/* Register Masks */
+#define RM_MUGAIN_CLEMUG \
+        RM(FM_MUGAIN_CLEMUG, FB_MUGAIN_CLEMUG)
+
+
+/* Register Values */
+#define RV_MUGAIN_CLEMUG_46PT5DB \
+        RV(FV_MUGAIN_CLEMUG_46PT5DB, FB_MUGAIN_CLEMUG)
+
+#define RV_MUGAIN_CLEMUG_0DB \
+        RV(FV_MUGAIN_CLEMUG_0DB, FB_MUGAIN_CLEMUG)
+
+
+/*****************************
+ *      R_COMPTH (0x27)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_COMPTH                            0
+
+/* Field Masks */
+#define FM_COMPTH                            0XFF
+
+/* Field Values */
+#define FV_COMPTH_0DB                        0xFF
+#define FV_COMPTH_N95PT625DB                 0x0
+
+/* Register Masks */
+#define RM_COMPTH                            RM(FM_COMPTH, FB_COMPTH)
+
+/* Register Values */
+#define RV_COMPTH_0DB                        RV(FV_COMPTH_0DB, FB_COMPTH)
+#define RV_COMPTH_N95PT625DB \
+        RV(FV_COMPTH_N95PT625DB, FB_COMPTH)
+
+
+/*****************************
+ *      R_CMPRAT (0x28)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_CMPRAT                            0
+
+/* Field Masks */
+#define FM_CMPRAT                            0X1F
+
+/* Register Masks */
+#define RM_CMPRAT                            RM(FM_CMPRAT, FB_CMPRAT)
+
+/******************************
+ *      R_CATKTCL (0x29)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CATKTCL                           0
+
+/* Field Masks */
+#define FM_CATKTCL                           0XFF
+
+/* Register Masks */
+#define RM_CATKTCL                           RM(FM_CATKTCL, FB_CATKTCL)
+
+/******************************
+ *      R_CATKTCH (0x2A)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CATKTCH                           0
+
+/* Field Masks */
+#define FM_CATKTCH                           0XFF
+
+/* Register Masks */
+#define RM_CATKTCH                           RM(FM_CATKTCH, FB_CATKTCH)
+
+/******************************
+ *      R_CRELTCL (0x2B)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CRELTCL                           0
+
+/* Field Masks */
+#define FM_CRELTCL                           0XFF
+
+/* Register Masks */
+#define RM_CRELTCL                           RM(FM_CRELTCL, FB_CRELTCL)
+
+/******************************
+ *      R_CRELTCH (0x2C)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CRELTCH                           0
+
+/* Field Masks */
+#define FM_CRELTCH                           0XFF
+
+/* Register Masks */
+#define RM_CRELTCH                           RM(FM_CRELTCH, FB_CRELTCH)
+
+/****************************
+ *      R_LIMTH (0x2D)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_LIMTH                             0
+
+/* Field Masks */
+#define FM_LIMTH                             0XFF
+
+/* Field Values */
+#define FV_LIMTH_0DB                         0xFF
+#define FV_LIMTH_N95PT625DB                  0x0
+
+/* Register Masks */
+#define RM_LIMTH                             RM(FM_LIMTH, FB_LIMTH)
+
+/* Register Values */
+#define RV_LIMTH_0DB                         RV(FV_LIMTH_0DB, FB_LIMTH)
+#define RV_LIMTH_N95PT625DB                  RV(FV_LIMTH_N95PT625DB, FB_LIMTH)
+
+/*****************************
+ *      R_LIMTGT (0x2E)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_LIMTGT                            0
+
+/* Field Masks */
+#define FM_LIMTGT                            0XFF
+
+/* Field Values */
+#define FV_LIMTGT_0DB                        0xFF
+#define FV_LIMTGT_N95PT625DB                 0x0
+
+/* Register Masks */
+#define RM_LIMTGT                            RM(FM_LIMTGT, FB_LIMTGT)
+
+/* Register Values */
+#define RV_LIMTGT_0DB                        RV(FV_LIMTGT_0DB, FB_LIMTGT)
+#define RV_LIMTGT_N95PT625DB \
+        RV(FV_LIMTGT_N95PT625DB, FB_LIMTGT)
+
+
+/******************************
+ *      R_LATKTCL (0x2F)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LATKTCL                           0
+
+/* Field Masks */
+#define FM_LATKTCL                           0XFF
+
+/* Register Masks */
+#define RM_LATKTCL                           RM(FM_LATKTCL, FB_LATKTCL)
+
+/******************************
+ *      R_LATKTCH (0x30)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LATKTCH                           0
+
+/* Field Masks */
+#define FM_LATKTCH                           0XFF
+
+/* Register Masks */
+#define RM_LATKTCH                           RM(FM_LATKTCH, FB_LATKTCH)
+
+/******************************
+ *      R_LRELTCL (0x31)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LRELTCL                           0
+
+/* Field Masks */
+#define FM_LRELTCL                           0XFF
+
+/* Register Masks */
+#define RM_LRELTCL                           RM(FM_LRELTCL, FB_LRELTCL)
+
+/******************************
+ *      R_LRELTCH (0x32)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LRELTCH                           0
+
+/* Field Masks */
+#define FM_LRELTCH                           0XFF
+
+/* Register Masks */
+#define RM_LRELTCH                           RM(FM_LRELTCH, FB_LRELTCH)
+
+/****************************
+ *      R_EXPTH (0x33)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_EXPTH                             0
+
+/* Field Masks */
+#define FM_EXPTH                             0XFF
+
+/* Field Values */
+#define FV_EXPTH_0DB                         0xFF
+#define FV_EXPTH_N95PT625DB                  0x0
+
+/* Register Masks */
+#define RM_EXPTH                             RM(FM_EXPTH, FB_EXPTH)
+
+/* Register Values */
+#define RV_EXPTH_0DB                         RV(FV_EXPTH_0DB, FB_EXPTH)
+#define RV_EXPTH_N95PT625DB                  RV(FV_EXPTH_N95PT625DB, FB_EXPTH)
+
+/*****************************
+ *      R_EXPRAT (0x34)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_EXPRAT                            0
+
+/* Field Masks */
+#define FM_EXPRAT                            0X7
+
+/* Register Masks */
+#define RM_EXPRAT                            RM(FM_EXPRAT, FB_EXPRAT)
+
+/******************************
+ *      R_XATKTCL (0x35)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XATKTCL                           0
+
+/* Field Masks */
+#define FM_XATKTCL                           0XFF
+
+/* Register Masks */
+#define RM_XATKTCL                           RM(FM_XATKTCL, FB_XATKTCL)
+
+/******************************
+ *      R_XATKTCH (0x36)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XATKTCH                           0
+
+/* Field Masks */
+#define FM_XATKTCH                           0XFF
+
+/* Register Masks */
+#define RM_XATKTCH                           RM(FM_XATKTCH, FB_XATKTCH)
+
+/******************************
+ *      R_XRELTCL (0x37)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XRELTCL                           0
+
+/* Field Masks */
+#define FM_XRELTCL                           0XFF
+
+/* Register Masks */
+#define RM_XRELTCL                           RM(FM_XRELTCL, FB_XRELTCL)
+
+/******************************
+ *      R_XRELTCH (0x38)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XRELTCH                           0
+
+/* Field Masks */
+#define FM_XRELTCH                           0XFF
+
+/* Register Masks */
+#define RM_XRELTCH                           RM(FM_XRELTCH, FB_XRELTCH)
+
+/****************************
+ *      R_FXCTL (0x39)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_FXCTL_3DEN                        4
+#define FB_FXCTL_TEEN                        3
+#define FB_FXCTL_TNLFBYPASS                  2
+#define FB_FXCTL_BEEN                        1
+#define FB_FXCTL_BNLFBYPASS                  0
+
+/* Field Masks */
+#define FM_FXCTL_3DEN                        0X1
+#define FM_FXCTL_TEEN                        0X1
+#define FM_FXCTL_TNLFBYPASS                  0X1
+#define FM_FXCTL_BEEN                        0X1
+#define FM_FXCTL_BNLFBYPASS                  0X1
+
+/* Field Values */
+#define FV_FXCTL_3DEN_ENABLE                 0x1
+#define FV_FXCTL_3DEN_DISABLE                0x0
+#define FV_FXCTL_TEEN_ENABLE                 0x1
+#define FV_FXCTL_TEEN_DISABLE                0x0
+#define FV_FXCTL_TNLFBYPASS_ENABLE           0x1
+#define FV_FXCTL_TNLFBYPASS_DISABLE          0x0
+#define FV_FXCTL_BEEN_ENABLE                 0x1
+#define FV_FXCTL_BEEN_DISABLE                0x0
+#define FV_FXCTL_BNLFBYPASS_ENABLE           0x1
+#define FV_FXCTL_BNLFBYPASS_DISABLE          0x0
+
+/* Register Masks */
+#define RM_FXCTL_3DEN                        RM(FM_FXCTL_3DEN, FB_FXCTL_3DEN)
+#define RM_FXCTL_TEEN                        RM(FM_FXCTL_TEEN, FB_FXCTL_TEEN)
+#define RM_FXCTL_TNLFBYPASS \
+        RM(FM_FXCTL_TNLFBYPASS, FB_FXCTL_TNLFBYPASS)
+
+#define RM_FXCTL_BEEN                        RM(FM_FXCTL_BEEN, FB_FXCTL_BEEN)
+#define RM_FXCTL_BNLFBYPASS \
+        RM(FM_FXCTL_BNLFBYPASS, FB_FXCTL_BNLFBYPASS)
+
+
+/* Register Values */
+#define RV_FXCTL_3DEN_ENABLE \
+        RV(FV_FXCTL_3DEN_ENABLE, FB_FXCTL_3DEN)
+
+#define RV_FXCTL_3DEN_DISABLE \
+        RV(FV_FXCTL_3DEN_DISABLE, FB_FXCTL_3DEN)
+
+#define RV_FXCTL_TEEN_ENABLE \
+        RV(FV_FXCTL_TEEN_ENABLE, FB_FXCTL_TEEN)
+
+#define RV_FXCTL_TEEN_DISABLE \
+        RV(FV_FXCTL_TEEN_DISABLE, FB_FXCTL_TEEN)
+
+#define RV_FXCTL_TNLFBYPASS_ENABLE \
+        RV(FV_FXCTL_TNLFBYPASS_ENABLE, FB_FXCTL_TNLFBYPASS)
+
+#define RV_FXCTL_TNLFBYPASS_DISABLE \
+        RV(FV_FXCTL_TNLFBYPASS_DISABLE, FB_FXCTL_TNLFBYPASS)
+
+#define RV_FXCTL_BEEN_ENABLE \
+        RV(FV_FXCTL_BEEN_ENABLE, FB_FXCTL_BEEN)
+
+#define RV_FXCTL_BEEN_DISABLE \
+        RV(FV_FXCTL_BEEN_DISABLE, FB_FXCTL_BEEN)
+
+#define RV_FXCTL_BNLFBYPASS_ENABLE \
+        RV(FV_FXCTL_BNLFBYPASS_ENABLE, FB_FXCTL_BNLFBYPASS)
+
+#define RV_FXCTL_BNLFBYPASS_DISABLE \
+        RV(FV_FXCTL_BNLFBYPASS_DISABLE, FB_FXCTL_BNLFBYPASS)
+
+
+/*******************************
+ *      R_DACCRWRL (0x3A)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRL_DACCRWDL                 0
+
+/* Field Masks */
+#define FM_DACCRWRL_DACCRWDL                 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRL_DACCRWDL \
+        RM(FM_DACCRWRL_DACCRWDL, FB_DACCRWRL_DACCRWDL)
+
+
+/*******************************
+ *      R_DACCRWRM (0x3B)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRM_DACCRWDM                 0
+
+/* Field Masks */
+#define FM_DACCRWRM_DACCRWDM                 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRM_DACCRWDM \
+        RM(FM_DACCRWRM_DACCRWDM, FB_DACCRWRM_DACCRWDM)
+
+
+/*******************************
+ *      R_DACCRWRH (0x3C)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRH_DACCRWDH                 0
+
+/* Field Masks */
+#define FM_DACCRWRH_DACCRWDH                 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRH_DACCRWDH \
+        RM(FM_DACCRWRH_DACCRWDH, FB_DACCRWRH_DACCRWDH)
+
+
+/*******************************
+ *      R_DACCRRDL (0x3D)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDL                          0
+
+/* Field Masks */
+#define FM_DACCRRDL                          0XFF
+
+/* Register Masks */
+#define RM_DACCRRDL                          RM(FM_DACCRRDL, FB_DACCRRDL)
+
+/*******************************
+ *      R_DACCRRDM (0x3E)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDM                          0
+
+/* Field Masks */
+#define FM_DACCRRDM                          0XFF
+
+/* Register Masks */
+#define RM_DACCRRDM                          RM(FM_DACCRRDM, FB_DACCRRDM)
+
+/*******************************
+ *      R_DACCRRDH (0x3F)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDH                          0
+
+/* Field Masks */
+#define FM_DACCRRDH                          0XFF
+
+/* Register Masks */
+#define RM_DACCRRDH                          RM(FM_DACCRRDH, FB_DACCRRDH)
+
+/********************************
+ *      R_DACCRADDR (0x40)      *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACCRADDR_DACCRADD                0
+
+/* Field Masks */
+#define FM_DACCRADDR_DACCRADD                0XFF
+
+/* Register Masks */
+#define RM_DACCRADDR_DACCRADD \
+        RM(FM_DACCRADDR_DACCRADD, FB_DACCRADDR_DACCRADD)
+
+
+/******************************
+ *      R_DCOFSEL (0x41)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_DCOFSEL_DC_COEF_SEL               0
+
+/* Field Masks */
+#define FM_DCOFSEL_DC_COEF_SEL               0X7
+
+/* Field Values */
+#define FV_DCOFSEL_DC_COEF_SEL_2_N8          0x0
+#define FV_DCOFSEL_DC_COEF_SEL_2_N9          0x1
+#define FV_DCOFSEL_DC_COEF_SEL_2_N10         0x2
+#define FV_DCOFSEL_DC_COEF_SEL_2_N11         0x3
+#define FV_DCOFSEL_DC_COEF_SEL_2_N12         0x4
+#define FV_DCOFSEL_DC_COEF_SEL_2_N13         0x5
+#define FV_DCOFSEL_DC_COEF_SEL_2_N14         0x6
+#define FV_DCOFSEL_DC_COEF_SEL_2_N15         0x7
+
+/* Register Masks */
+#define RM_DCOFSEL_DC_COEF_SEL \
+        RM(FM_DCOFSEL_DC_COEF_SEL, FB_DCOFSEL_DC_COEF_SEL)
+
+
+/* Register Values */
+#define RV_DCOFSEL_DC_COEF_SEL_2_N8 \
+        RV(FV_DCOFSEL_DC_COEF_SEL_2_N8, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N9 \
+        RV(FV_DCOFSEL_DC_COEF_SEL_2_N9, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N10 \
+        RV(FV_DCOFSEL_DC_COEF_SEL_2_N10, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N11 \
+        RV(FV_DCOFSEL_DC_COEF_SEL_2_N11, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N12 \
+        RV(FV_DCOFSEL_DC_COEF_SEL_2_N12, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N13 \
+        RV(FV_DCOFSEL_DC_COEF_SEL_2_N13, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N14 \
+        RV(FV_DCOFSEL_DC_COEF_SEL_2_N14, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N15 \
+        RV(FV_DCOFSEL_DC_COEF_SEL_2_N15, FB_DCOFSEL_DC_COEF_SEL)
+
+
+/******************************
+ *      R_PLLCTL9 (0x4E)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL9_REFDIV_PLL1               0
+
+/* Field Masks */
+#define FM_PLLCTL9_REFDIV_PLL1               0XFF
+
+/* Register Masks */
+#define RM_PLLCTL9_REFDIV_PLL1 \
+        RM(FM_PLLCTL9_REFDIV_PLL1, FB_PLLCTL9_REFDIV_PLL1)
+
+
+/******************************
+ *      R_PLLCTLA (0x4F)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLA_OUTDIV_PLL1               0
+
+/* Field Masks */
+#define FM_PLLCTLA_OUTDIV_PLL1               0XFF
+
+/* Register Masks */
+#define RM_PLLCTLA_OUTDIV_PLL1 \
+        RM(FM_PLLCTLA_OUTDIV_PLL1, FB_PLLCTLA_OUTDIV_PLL1)
+
+
+/******************************
+ *      R_PLLCTLB (0x50)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLB_FBDIV_PLL1L               0
+
+/* Field Masks */
+#define FM_PLLCTLB_FBDIV_PLL1L               0XFF
+
+/* Register Masks */
+#define RM_PLLCTLB_FBDIV_PLL1L \
+        RM(FM_PLLCTLB_FBDIV_PLL1L, FB_PLLCTLB_FBDIV_PLL1L)
+
+
+/******************************
+ *      R_PLLCTLC (0x51)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLC_FBDIV_PLL1H               0
+
+/* Field Masks */
+#define FM_PLLCTLC_FBDIV_PLL1H               0X7
+
+/* Register Masks */
+#define RM_PLLCTLC_FBDIV_PLL1H \
+        RM(FM_PLLCTLC_FBDIV_PLL1H, FB_PLLCTLC_FBDIV_PLL1H)
+
+
+/******************************
+ *      R_PLLCTLD (0x52)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLD_RZ_PLL1                   3
+#define FB_PLLCTLD_CP_PLL1                   0
+
+/* Field Masks */
+#define FM_PLLCTLD_RZ_PLL1                   0X7
+#define FM_PLLCTLD_CP_PLL1                   0X7
+
+/* Register Masks */
+#define RM_PLLCTLD_RZ_PLL1 \
+        RM(FM_PLLCTLD_RZ_PLL1, FB_PLLCTLD_RZ_PLL1)
+
+#define RM_PLLCTLD_CP_PLL1 \
+        RM(FM_PLLCTLD_CP_PLL1, FB_PLLCTLD_CP_PLL1)
+
+
+/******************************
+ *      R_PLLCTLE (0x53)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLE_REFDIV_PLL2               0
+
+/* Field Masks */
+#define FM_PLLCTLE_REFDIV_PLL2               0XFF
+
+/* Register Masks */
+#define RM_PLLCTLE_REFDIV_PLL2 \
+        RM(FM_PLLCTLE_REFDIV_PLL2, FB_PLLCTLE_REFDIV_PLL2)
+
+
+/******************************
+ *      R_PLLCTLF (0x54)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLF_OUTDIV_PLL2               0
+
+/* Field Masks */
+#define FM_PLLCTLF_OUTDIV_PLL2               0XFF
+
+/* Register Masks */
+#define RM_PLLCTLF_OUTDIV_PLL2 \
+        RM(FM_PLLCTLF_OUTDIV_PLL2, FB_PLLCTLF_OUTDIV_PLL2)
+
+
+/*******************************
+ *      R_PLLCTL10 (0x55)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL10_FBDIV_PLL2L              0
+
+/* Field Masks */
+#define FM_PLLCTL10_FBDIV_PLL2L              0XFF
+
+/* Register Masks */
+#define RM_PLLCTL10_FBDIV_PLL2L \
+        RM(FM_PLLCTL10_FBDIV_PLL2L, FB_PLLCTL10_FBDIV_PLL2L)
+
+
+/*******************************
+ *      R_PLLCTL11 (0x56)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL11_FBDIV_PLL2H              0
+
+/* Field Masks */
+#define FM_PLLCTL11_FBDIV_PLL2H              0X7
+
+/* Register Masks */
+#define RM_PLLCTL11_FBDIV_PLL2H \
+        RM(FM_PLLCTL11_FBDIV_PLL2H, FB_PLLCTL11_FBDIV_PLL2H)
+
+
+/*******************************
+ *      R_PLLCTL12 (0x57)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL12_RZ_PLL2                  3
+#define FB_PLLCTL12_CP_PLL2                  0
+
+/* Field Masks */
+#define FM_PLLCTL12_RZ_PLL2                  0X7
+#define FM_PLLCTL12_CP_PLL2                  0X7
+
+/* Register Masks */
+#define RM_PLLCTL12_RZ_PLL2 \
+        RM(FM_PLLCTL12_RZ_PLL2, FB_PLLCTL12_RZ_PLL2)
+
+#define RM_PLLCTL12_CP_PLL2 \
+        RM(FM_PLLCTL12_CP_PLL2, FB_PLLCTL12_CP_PLL2)
+
+
+/*******************************
+ *      R_PLLCTL1B (0x60)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL1B_VCOI_PLL2                4
+#define FB_PLLCTL1B_VCOI_PLL1                2
+
+/* Field Masks */
+#define FM_PLLCTL1B_VCOI_PLL2                0X3
+#define FM_PLLCTL1B_VCOI_PLL1                0X3
+
+/* Register Masks */
+#define RM_PLLCTL1B_VCOI_PLL2 \
+        RM(FM_PLLCTL1B_VCOI_PLL2, FB_PLLCTL1B_VCOI_PLL2)
+
+#define RM_PLLCTL1B_VCOI_PLL1 \
+        RM(FM_PLLCTL1B_VCOI_PLL1, FB_PLLCTL1B_VCOI_PLL1)
+
+
+/*******************************
+ *      R_PLLCTL1C (0x61)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL1C_PDB_PLL2                 2
+#define FB_PLLCTL1C_PDB_PLL1                 1
+
+/* Field Masks */
+#define FM_PLLCTL1C_PDB_PLL2                 0X1
+#define FM_PLLCTL1C_PDB_PLL1                 0X1
+
+/* Field Values */
+#define FV_PLLCTL1C_PDB_PLL2_ENABLE          0x1
+#define FV_PLLCTL1C_PDB_PLL2_DISABLE         0x0
+#define FV_PLLCTL1C_PDB_PLL1_ENABLE          0x1
+#define FV_PLLCTL1C_PDB_PLL1_DISABLE         0x0
+
+/* Register Masks */
+#define RM_PLLCTL1C_PDB_PLL2 \
+        RM(FM_PLLCTL1C_PDB_PLL2, FB_PLLCTL1C_PDB_PLL2)
+
+#define RM_PLLCTL1C_PDB_PLL1 \
+        RM(FM_PLLCTL1C_PDB_PLL1, FB_PLLCTL1C_PDB_PLL1)
+
+
+/* Register Values */
+#define RV_PLLCTL1C_PDB_PLL2_ENABLE \
+        RV(FV_PLLCTL1C_PDB_PLL2_ENABLE, FB_PLLCTL1C_PDB_PLL2)
+
+#define RV_PLLCTL1C_PDB_PLL2_DISABLE \
+        RV(FV_PLLCTL1C_PDB_PLL2_DISABLE, FB_PLLCTL1C_PDB_PLL2)
+
+#define RV_PLLCTL1C_PDB_PLL1_ENABLE \
+        RV(FV_PLLCTL1C_PDB_PLL1_ENABLE, FB_PLLCTL1C_PDB_PLL1)
+
+#define RV_PLLCTL1C_PDB_PLL1_DISABLE \
+        RV(FV_PLLCTL1C_PDB_PLL1_DISABLE, FB_PLLCTL1C_PDB_PLL1)
+
+
+/*******************************
+ *      R_TIMEBASE (0x77)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_TIMEBASE_DIVIDER                  0
+
+/* Field Masks */
+#define FM_TIMEBASE_DIVIDER                  0XFF
+
+/* Register Masks */
+#define RM_TIMEBASE_DIVIDER \
+        RM(FM_TIMEBASE_DIVIDER, FB_TIMEBASE_DIVIDER)
+
+
+/*****************************
+ *      R_DEVIDL (0x7D)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DEVIDL_DIDL                       0
+
+/* Field Masks */
+#define FM_DEVIDL_DIDL                       0XFF
+
+/* Register Masks */
+#define RM_DEVIDL_DIDL                       RM(FM_DEVIDL_DIDL, FB_DEVIDL_DIDL)
+
+/*****************************
+ *      R_DEVIDH (0x7E)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DEVIDH_DIDH                       0
+
+/* Field Masks */
+#define FM_DEVIDH_DIDH                       0XFF
+
+/* Register Masks */
+#define RM_DEVIDH_DIDH                       RM(FM_DEVIDH_DIDH, FB_DEVIDH_DIDH)
+
+/****************************
+ *      R_RESET (0x80)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_RESET                             0
+
+/* Field Masks */
+#define FM_RESET                             0XFF
+
+/* Field Values */
+#define FV_RESET_ENABLE                      0x85
+
+/* Register Masks */
+#define RM_RESET                             RM(FM_RESET, FB_RESET)
+
+/* Register Values */
+#define RV_RESET_ENABLE                      RV(FV_RESET_ENABLE, FB_RESET)
+
+/********************************
+ *      R_DACCRSTAT (0x8A)      *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACCRSTAT_DACCR_BUSY              7
+
+/* Field Masks */
+#define FM_DACCRSTAT_DACCR_BUSY              0X1
+
+/* Register Masks */
+#define RM_DACCRSTAT_DACCR_BUSY \
+        RM(FM_DACCRSTAT_DACCR_BUSY, FB_DACCRSTAT_DACCR_BUSY)
+
+
+/******************************
+ *      R_PLLCTL0 (0x8E)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL0_PLL2_LOCK                 1
+#define FB_PLLCTL0_PLL1_LOCK                 0
+
+/* Field Masks */
+#define FM_PLLCTL0_PLL2_LOCK                 0X1
+#define FM_PLLCTL0_PLL1_LOCK                 0X1
+
+/* Register Masks */
+#define RM_PLLCTL0_PLL2_LOCK \
+        RM(FM_PLLCTL0_PLL2_LOCK, FB_PLLCTL0_PLL2_LOCK)
+
+#define RM_PLLCTL0_PLL1_LOCK \
+        RM(FM_PLLCTL0_PLL1_LOCK, FB_PLLCTL0_PLL1_LOCK)
+
+
+/********************************
+ *      R_PLLREFSEL (0x8F)      *
+ ********************************/
+
+/* Field Offsets */
+#define FB_PLLREFSEL_PLL2_REF_SEL            4
+#define FB_PLLREFSEL_PLL1_REF_SEL            0
+
+/* Field Masks */
+#define FM_PLLREFSEL_PLL2_REF_SEL            0X7
+#define FM_PLLREFSEL_PLL1_REF_SEL            0X7
+
+/* Field Values */
+#define FV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1 0x0
+#define FV_PLLREFSEL_PLL2_REF_SEL_MCLK2      0x1
+#define FV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 0x0
+#define FV_PLLREFSEL_PLL1_REF_SEL_MCLK2      0x1
+
+/* Register Masks */
+#define RM_PLLREFSEL_PLL2_REF_SEL \
+        RM(FM_PLLREFSEL_PLL2_REF_SEL, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RM_PLLREFSEL_PLL1_REF_SEL \
+        RM(FM_PLLREFSEL_PLL1_REF_SEL, FB_PLLREFSEL_PLL1_REF_SEL)
+
+
+/* Register Values */
+#define RV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1 \
+        RV(FV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RV_PLLREFSEL_PLL2_REF_SEL_MCLK2 \
+        RV(FV_PLLREFSEL_PLL2_REF_SEL_MCLK2, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 \
+        RV(FV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1, FB_PLLREFSEL_PLL1_REF_SEL)
+
+#define RV_PLLREFSEL_PLL1_REF_SEL_MCLK2 \
+        RV(FV_PLLREFSEL_PLL1_REF_SEL_MCLK2, FB_PLLREFSEL_PLL1_REF_SEL)
+
+
+/*******************************
+ *      R_DACMBCEN (0xC7)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACMBCEN_MBCEN3                   2
+#define FB_DACMBCEN_MBCEN2                   1
+#define FB_DACMBCEN_MBCEN1                   0
+
+/* Field Masks */
+#define FM_DACMBCEN_MBCEN3                   0X1
+#define FM_DACMBCEN_MBCEN2                   0X1
+#define FM_DACMBCEN_MBCEN1                   0X1
+
+/* Register Masks */
+#define RM_DACMBCEN_MBCEN3 \
+        RM(FM_DACMBCEN_MBCEN3, FB_DACMBCEN_MBCEN3)
+
+#define RM_DACMBCEN_MBCEN2 \
+        RM(FM_DACMBCEN_MBCEN2, FB_DACMBCEN_MBCEN2)
+
+#define RM_DACMBCEN_MBCEN1 \
+        RM(FM_DACMBCEN_MBCEN1, FB_DACMBCEN_MBCEN1)
+
+
+/********************************
+ *      R_DACMBCCTL (0xC8)      *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACMBCCTL_LVLMODE3                5
+#define FB_DACMBCCTL_WINSEL3                 4
+#define FB_DACMBCCTL_LVLMODE2                3
+#define FB_DACMBCCTL_WINSEL2                 2
+#define FB_DACMBCCTL_LVLMODE1                1
+#define FB_DACMBCCTL_WINSEL1                 0
+
+/* Field Masks */
+#define FM_DACMBCCTL_LVLMODE3                0X1
+#define FM_DACMBCCTL_WINSEL3                 0X1
+#define FM_DACMBCCTL_LVLMODE2                0X1
+#define FM_DACMBCCTL_WINSEL2                 0X1
+#define FM_DACMBCCTL_LVLMODE1                0X1
+#define FM_DACMBCCTL_WINSEL1                 0X1
+
+/* Register Masks */
+#define RM_DACMBCCTL_LVLMODE3 \
+        RM(FM_DACMBCCTL_LVLMODE3, FB_DACMBCCTL_LVLMODE3)
+
+#define RM_DACMBCCTL_WINSEL3 \
+        RM(FM_DACMBCCTL_WINSEL3, FB_DACMBCCTL_WINSEL3)
+
+#define RM_DACMBCCTL_LVLMODE2 \
+        RM(FM_DACMBCCTL_LVLMODE2, FB_DACMBCCTL_LVLMODE2)
+
+#define RM_DACMBCCTL_WINSEL2 \
+        RM(FM_DACMBCCTL_WINSEL2, FB_DACMBCCTL_WINSEL2)
+
+#define RM_DACMBCCTL_LVLMODE1 \
+        RM(FM_DACMBCCTL_LVLMODE1, FB_DACMBCCTL_LVLMODE1)
+
+#define RM_DACMBCCTL_WINSEL1 \
+        RM(FM_DACMBCCTL_WINSEL1, FB_DACMBCCTL_WINSEL1)
+
+
+/*********************************
+ *      R_DACMBCMUG1 (0xC9)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG1_PHASE                  5
+#define FB_DACMBCMUG1_MUGAIN                 0
+
+/* Field Masks */
+#define FM_DACMBCMUG1_PHASE                  0X1
+#define FM_DACMBCMUG1_MUGAIN                 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG1_PHASE \
+        RM(FM_DACMBCMUG1_PHASE, FB_DACMBCMUG1_PHASE)
+
+#define RM_DACMBCMUG1_MUGAIN \
+        RM(FM_DACMBCMUG1_MUGAIN, FB_DACMBCMUG1_MUGAIN)
+
+
+/*********************************
+ *      R_DACMBCTHR1 (0xCA)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR1_THRESH                 0
+
+/* Field Masks */
+#define FM_DACMBCTHR1_THRESH                 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR1_THRESH \
+        RM(FM_DACMBCTHR1_THRESH, FB_DACMBCTHR1_THRESH)
+
+
+/*********************************
+ *      R_DACMBCRAT1 (0xCB)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT1_RATIO                  0
+
+/* Field Masks */
+#define FM_DACMBCRAT1_RATIO                  0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT1_RATIO \
+        RM(FM_DACMBCRAT1_RATIO, FB_DACMBCRAT1_RATIO)
+
+
+/**********************************
+ *      R_DACMBCATK1L (0xCC)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK1L_TCATKL                0
+
+/* Field Masks */
+#define FM_DACMBCATK1L_TCATKL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK1L_TCATKL \
+        RM(FM_DACMBCATK1L_TCATKL, FB_DACMBCATK1L_TCATKL)
+
+
+/**********************************
+ *      R_DACMBCATK1H (0xCD)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK1H_TCATKH                0
+
+/* Field Masks */
+#define FM_DACMBCATK1H_TCATKH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK1H_TCATKH \
+        RM(FM_DACMBCATK1H_TCATKH, FB_DACMBCATK1H_TCATKH)
+
+
+/**********************************
+ *      R_DACMBCREL1L (0xCE)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL1L_TCRELL                0
+
+/* Field Masks */
+#define FM_DACMBCREL1L_TCRELL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL1L_TCRELL \
+        RM(FM_DACMBCREL1L_TCRELL, FB_DACMBCREL1L_TCRELL)
+
+
+/**********************************
+ *      R_DACMBCREL1H (0xCF)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL1H_TCRELH                0
+
+/* Field Masks */
+#define FM_DACMBCREL1H_TCRELH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL1H_TCRELH \
+        RM(FM_DACMBCREL1H_TCRELH, FB_DACMBCREL1H_TCRELH)
+
+
+/*********************************
+ *      R_DACMBCMUG2 (0xD0)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG2_PHASE                  5
+#define FB_DACMBCMUG2_MUGAIN                 0
+
+/* Field Masks */
+#define FM_DACMBCMUG2_PHASE                  0X1
+#define FM_DACMBCMUG2_MUGAIN                 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG2_PHASE \
+        RM(FM_DACMBCMUG2_PHASE, FB_DACMBCMUG2_PHASE)
+
+#define RM_DACMBCMUG2_MUGAIN \
+        RM(FM_DACMBCMUG2_MUGAIN, FB_DACMBCMUG2_MUGAIN)
+
+
+/*********************************
+ *      R_DACMBCTHR2 (0xD1)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR2_THRESH                 0
+
+/* Field Masks */
+#define FM_DACMBCTHR2_THRESH                 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR2_THRESH \
+        RM(FM_DACMBCTHR2_THRESH, FB_DACMBCTHR2_THRESH)
+
+
+/*********************************
+ *      R_DACMBCRAT2 (0xD2)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT2_RATIO                  0
+
+/* Field Masks */
+#define FM_DACMBCRAT2_RATIO                  0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT2_RATIO \
+        RM(FM_DACMBCRAT2_RATIO, FB_DACMBCRAT2_RATIO)
+
+
+/**********************************
+ *      R_DACMBCATK2L (0xD3)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK2L_TCATKL                0
+
+/* Field Masks */
+#define FM_DACMBCATK2L_TCATKL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK2L_TCATKL \
+        RM(FM_DACMBCATK2L_TCATKL, FB_DACMBCATK2L_TCATKL)
+
+
+/**********************************
+ *      R_DACMBCATK2H (0xD4)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK2H_TCATKH                0
+
+/* Field Masks */
+#define FM_DACMBCATK2H_TCATKH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK2H_TCATKH \
+        RM(FM_DACMBCATK2H_TCATKH, FB_DACMBCATK2H_TCATKH)
+
+
+/**********************************
+ *      R_DACMBCREL2L (0xD5)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL2L_TCRELL                0
+
+/* Field Masks */
+#define FM_DACMBCREL2L_TCRELL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL2L_TCRELL \
+        RM(FM_DACMBCREL2L_TCRELL, FB_DACMBCREL2L_TCRELL)
+
+
+/**********************************
+ *      R_DACMBCREL2H (0xD6)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL2H_TCRELH                0
+
+/* Field Masks */
+#define FM_DACMBCREL2H_TCRELH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL2H_TCRELH \
+        RM(FM_DACMBCREL2H_TCRELH, FB_DACMBCREL2H_TCRELH)
+
+
+/*********************************
+ *      R_DACMBCMUG3 (0xD7)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG3_PHASE                  5
+#define FB_DACMBCMUG3_MUGAIN                 0
+
+/* Field Masks */
+#define FM_DACMBCMUG3_PHASE                  0X1
+#define FM_DACMBCMUG3_MUGAIN                 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG3_PHASE \
+        RM(FM_DACMBCMUG3_PHASE, FB_DACMBCMUG3_PHASE)
+
+#define RM_DACMBCMUG3_MUGAIN \
+        RM(FM_DACMBCMUG3_MUGAIN, FB_DACMBCMUG3_MUGAIN)
+
+
+/*********************************
+ *      R_DACMBCTHR3 (0xD8)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR3_THRESH                 0
+
+/* Field Masks */
+#define FM_DACMBCTHR3_THRESH                 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR3_THRESH \
+        RM(FM_DACMBCTHR3_THRESH, FB_DACMBCTHR3_THRESH)
+
+
+/*********************************
+ *      R_DACMBCRAT3 (0xD9)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT3_RATIO                  0
+
+/* Field Masks */
+#define FM_DACMBCRAT3_RATIO                  0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT3_RATIO \
+        RM(FM_DACMBCRAT3_RATIO, FB_DACMBCRAT3_RATIO)
+
+
+/**********************************
+ *      R_DACMBCATK3L (0xDA)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK3L_TCATKL                0
+
+/* Field Masks */
+#define FM_DACMBCATK3L_TCATKL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK3L_TCATKL \
+        RM(FM_DACMBCATK3L_TCATKL, FB_DACMBCATK3L_TCATKL)
+
+
+/**********************************
+ *      R_DACMBCATK3H (0xDB)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK3H_TCATKH                0
+
+/* Field Masks */
+#define FM_DACMBCATK3H_TCATKH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK3H_TCATKH \
+        RM(FM_DACMBCATK3H_TCATKH, FB_DACMBCATK3H_TCATKH)
+
+
+/**********************************
+ *      R_DACMBCREL3L (0xDC)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL3L_TCRELL                0
+
+/* Field Masks */
+#define FM_DACMBCREL3L_TCRELL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL3L_TCRELL \
+        RM(FM_DACMBCREL3L_TCRELL, FB_DACMBCREL3L_TCRELL)
+
+
+/**********************************
+ *      R_DACMBCREL3H (0xDD)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL3H_TCRELH                0
+
+/* Field Masks */
+#define FM_DACMBCREL3H_TCRELH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL3H_TCRELH \
+        RM(FM_DACMBCREL3H_TCRELH, FB_DACMBCREL3H_TCRELH)
+
+
+#endif /* __WOOKIE_H__ */
index c482b2e..8798182 100644 (file)
@@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
        struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
        struct device_node *twl4030_codec_node = NULL;
 
-       twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node,
+       twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
                                                  "codec");
 
        if (!pdata && twl4030_codec_node) {
@@ -240,10 +240,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
                                     sizeof(struct twl4030_codec_data),
                                     GFP_KERNEL);
                if (!pdata) {
-                       dev_err(codec->dev, "Can not allocate memory\n");
+                       of_node_put(twl4030_codec_node);
                        return NULL;
                }
                twl4030_setup_pdata_of(pdata, twl4030_codec_node);
+               of_node_put(twl4030_codec_node);
        }
 
        return pdata;
@@ -849,14 +850,14 @@ static int snd_soc_get_volsw_twl4030(struct snd_kcontrol *kcontrol,
        int mask = (1 << fls(max)) - 1;
 
        ucontrol->value.integer.value[0] =
-               (snd_soc_read(codec, reg) >> shift) & mask;
+               (twl4030_read(codec, reg) >> shift) & mask;
        if (ucontrol->value.integer.value[0])
                ucontrol->value.integer.value[0] =
                        max + 1 - ucontrol->value.integer.value[0];
 
        if (shift != rshift) {
                ucontrol->value.integer.value[1] =
-                       (snd_soc_read(codec, reg) >> rshift) & mask;
+                       (twl4030_read(codec, reg) >> rshift) & mask;
                if (ucontrol->value.integer.value[1])
                        ucontrol->value.integer.value[1] =
                                max + 1 - ucontrol->value.integer.value[1];
@@ -907,9 +908,9 @@ static int snd_soc_get_volsw_r2_twl4030(struct snd_kcontrol *kcontrol,
        int mask = (1<<fls(max))-1;
 
        ucontrol->value.integer.value[0] =
-               (snd_soc_read(codec, reg) >> shift) & mask;
+               (twl4030_read(codec, reg) >> shift) & mask;
        ucontrol->value.integer.value[1] =
-               (snd_soc_read(codec, reg2) >> shift) & mask;
+               (twl4030_read(codec, reg2) >> shift) & mask;
 
        if (ucontrol->value.integer.value[0])
                ucontrol->value.integer.value[0] =
@@ -2194,8 +2195,6 @@ static int twl4030_soc_remove(struct snd_soc_codec *codec)
 static const struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
        .probe = twl4030_soc_probe,
        .remove = twl4030_soc_remove,
-       .read = twl4030_read,
-       .write = twl4030_write,
        .set_bias_level = twl4030_set_bias_level,
        .idle_bias_off = true,
 
index 1773ff8..3b895b4 100644 (file)
@@ -106,10 +106,12 @@ static const struct snd_pcm_hw_constraint_list sysclk_constraints[] = {
        { .count = ARRAY_SIZE(hp_rates), .list = hp_rates, },
 };
 
+#define to_twl6040(codec)      dev_get_drvdata((codec)->dev->parent)
+
 static unsigned int twl6040_read(struct snd_soc_codec *codec, unsigned int reg)
 {
        struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
-       struct twl6040 *twl6040 = codec->control_data;
+       struct twl6040 *twl6040 = to_twl6040(codec);
        u8 value;
 
        if (reg >= TWL6040_CACHEREGNUM)
@@ -171,7 +173,7 @@ static inline void twl6040_update_dl12_cache(struct snd_soc_codec *codec,
 static int twl6040_write(struct snd_soc_codec *codec,
                        unsigned int reg, unsigned int value)
 {
-       struct twl6040 *twl6040 = codec->control_data;
+       struct twl6040 *twl6040 = to_twl6040(codec);
 
        if (reg >= TWL6040_CACHEREGNUM)
                return -EIO;
@@ -541,7 +543,7 @@ int twl6040_get_dl1_gain(struct snd_soc_codec *codec)
        if (snd_soc_dapm_get_pin_status(dapm, "HSOR") ||
                snd_soc_dapm_get_pin_status(dapm, "HSOL")) {
 
-               u8 val = snd_soc_read(codec, TWL6040_REG_HSLCTL);
+               u8 val = twl6040_read(codec, TWL6040_REG_HSLCTL);
                if (val & TWL6040_HSDACMODE)
                        /* HSDACL in LP mode */
                        return -8; /* -8dB */
@@ -572,7 +574,7 @@ EXPORT_SYMBOL_GPL(twl6040_get_trim_value);
 
 int twl6040_get_hs_step_size(struct snd_soc_codec *codec)
 {
-       struct twl6040 *twl6040 = codec->control_data;
+       struct twl6040 *twl6040 = to_twl6040(codec);
 
        if (twl6040_get_revid(twl6040) < TWL6040_REV_ES1_3)
                /* For ES under ES_1.3 HS step is 2 mV */
@@ -830,7 +832,7 @@ static const struct snd_soc_dapm_route intercon[] = {
 static int twl6040_set_bias_level(struct snd_soc_codec *codec,
                                enum snd_soc_bias_level level)
 {
-       struct twl6040 *twl6040 = codec->control_data;
+       struct twl6040 *twl6040 = to_twl6040(codec);
        struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
        int ret = 0;
 
@@ -922,7 +924,7 @@ static int twl6040_prepare(struct snd_pcm_substream *substream,
                        struct snd_soc_dai *dai)
 {
        struct snd_soc_codec *codec = dai->codec;
-       struct twl6040 *twl6040 = codec->control_data;
+       struct twl6040 *twl6040 = to_twl6040(codec);
        struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
        int ret;
 
@@ -964,7 +966,7 @@ static int twl6040_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 static void twl6040_mute_path(struct snd_soc_codec *codec, enum twl6040_dai_id id,
                             int mute)
 {
-       struct twl6040 *twl6040 = codec->control_data;
+       struct twl6040 *twl6040 = to_twl6040(codec);
        struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
        int hslctl, hsrctl, earctl;
        int hflctl, hfrctl;
@@ -1108,7 +1110,6 @@ static struct snd_soc_dai_driver twl6040_dai[] = {
 static int twl6040_probe(struct snd_soc_codec *codec)
 {
        struct twl6040_data *priv;
-       struct twl6040 *twl6040 = dev_get_drvdata(codec->dev->parent);
        struct platform_device *pdev = to_platform_device(codec->dev);
        int ret = 0;
 
@@ -1119,7 +1120,6 @@ static int twl6040_probe(struct snd_soc_codec *codec)
        snd_soc_codec_set_drvdata(codec, priv);
 
        priv->codec = codec;
-       codec->control_data = twl6040;
 
        priv->plug_irq = platform_get_irq(pdev, 0);
        if (priv->plug_irq < 0) {
@@ -1158,8 +1158,6 @@ static int twl6040_remove(struct snd_soc_codec *codec)
 static const struct snd_soc_codec_driver soc_codec_dev_twl6040 = {
        .probe = twl6040_probe,
        .remove = twl6040_remove,
-       .read = twl6040_read,
-       .write = twl6040_write,
        .set_bias_level = twl6040_set_bias_level,
        .suspend_bias_off = true,
        .ignore_pmdown_time = true,
index 926c81a..c73e6a1 100644 (file)
@@ -37,7 +37,8 @@ struct uda1380_priv {
        struct snd_soc_codec *codec;
        unsigned int dac_clk;
        struct work_struct work;
-       void *control_data;
+       struct i2c_client *i2c;
+       u16 *reg_cache;
 };
 
 /*
@@ -63,7 +64,9 @@ static unsigned long uda1380_cache_dirty;
 static inline unsigned int uda1380_read_reg_cache(struct snd_soc_codec *codec,
        unsigned int reg)
 {
-       u16 *cache = codec->reg_cache;
+       struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
+       u16 *cache = uda1380->reg_cache;
+
        if (reg == UDA1380_RESET)
                return 0;
        if (reg >= UDA1380_CACHEREGNUM)
@@ -77,7 +80,8 @@ static inline unsigned int uda1380_read_reg_cache(struct snd_soc_codec *codec,
 static inline void uda1380_write_reg_cache(struct snd_soc_codec *codec,
        u16 reg, unsigned int value)
 {
-       u16 *cache = codec->reg_cache;
+       struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
+       u16 *cache = uda1380->reg_cache;
 
        if (reg >= UDA1380_CACHEREGNUM)
                return;
@@ -92,6 +96,7 @@ static inline void uda1380_write_reg_cache(struct snd_soc_codec *codec,
 static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
        unsigned int value)
 {
+       struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
        u8 data[3];
 
        /* data is
@@ -111,10 +116,10 @@ static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
        if (!snd_soc_codec_is_active(codec) && (reg >= UDA1380_MVOL))
                return 0;
        pr_debug("uda1380: hw write %x val %x\n", reg, value);
-       if (codec->hw_write(codec->control_data, data, 3) == 3) {
+       if (i2c_master_send(uda1380->i2c, data, 3) == 3) {
                unsigned int val;
-               i2c_master_send(codec->control_data, data, 1);
-               i2c_master_recv(codec->control_data, data, 2);
+               i2c_master_send(uda1380->i2c, data, 1);
+               i2c_master_recv(uda1380->i2c, data, 2);
                val = (data[0]<<8) | data[1];
                if (val != value) {
                        pr_debug("uda1380: READ BACK VAL %x\n",
@@ -130,16 +135,17 @@ static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
 
 static void uda1380_sync_cache(struct snd_soc_codec *codec)
 {
+       struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
        int reg;
        u8 data[3];
-       u16 *cache = codec->reg_cache;
+       u16 *cache = uda1380->reg_cache;
 
        /* Sync reg_cache with the hardware */
        for (reg = 0; reg < UDA1380_MVOL; reg++) {
                data[0] = reg;
                data[1] = (cache[reg] & 0xff00) >> 8;
                data[2] = cache[reg] & 0x00ff;
-               if (codec->hw_write(codec->control_data, data, 3) != 3)
+               if (i2c_master_send(uda1380->i2c, data, 3) != 3)
                        dev_err(codec->dev, "%s: write to reg 0x%x failed\n",
                                __func__, reg);
        }
@@ -148,6 +154,7 @@ static void uda1380_sync_cache(struct snd_soc_codec *codec)
 static int uda1380_reset(struct snd_soc_codec *codec)
 {
        struct uda1380_platform_data *pdata = codec->dev->platform_data;
+       struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
 
        if (gpio_is_valid(pdata->gpio_reset)) {
                gpio_set_value(pdata->gpio_reset, 1);
@@ -160,7 +167,7 @@ static int uda1380_reset(struct snd_soc_codec *codec)
                data[1] = 0;
                data[2] = 0;
 
-               if (codec->hw_write(codec->control_data, data, 3) != 3) {
+               if (i2c_master_send(uda1380->i2c, data, 3) != 3) {
                        dev_err(codec->dev, "%s: failed\n", __func__);
                        return -EIO;
                }
@@ -695,9 +702,6 @@ static int uda1380_probe(struct snd_soc_codec *codec)
 
        uda1380->codec = codec;
 
-       codec->hw_write = (hw_write_t)i2c_master_send;
-       codec->control_data = uda1380->control_data;
-
        if (!gpio_is_valid(pdata->gpio_power)) {
                ret = uda1380_reset(codec);
                if (ret)
@@ -727,11 +731,6 @@ static const struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
        .set_bias_level = uda1380_set_bias_level,
        .suspend_bias_off = true,
 
-       .reg_cache_size = ARRAY_SIZE(uda1380_reg),
-       .reg_word_size = sizeof(u16),
-       .reg_cache_default = uda1380_reg,
-       .reg_cache_step = 1,
-
        .component_driver = {
                .controls               = uda1380_snd_controls,
                .num_controls           = ARRAY_SIZE(uda1380_snd_controls),
@@ -771,8 +770,15 @@ static int uda1380_i2c_probe(struct i2c_client *i2c,
                        return ret;
        }
 
+       uda1380->reg_cache = devm_kmemdup(&i2c->dev,
+                                       uda1380_reg,
+                                       ARRAY_SIZE(uda1380_reg) * sizeof(u16),
+                                       GFP_KERNEL);
+       if (!uda1380->reg_cache)
+               return -ENOMEM;
+
        i2c_set_clientdata(i2c, uda1380);
-       uda1380->control_data = i2c;
+       uda1380->i2c = i2c;
 
        ret =  snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_uda1380, uda1380_dai, ARRAY_SIZE(uda1380_dai));
index 4f5f571..0147d2f 100644 (file)
@@ -655,11 +655,8 @@ static int wm0010_boot(struct snd_soc_codec *codec)
                ret = -ENOMEM;
                len = pll_rec.length + 8;
                out = kzalloc(len, GFP_KERNEL | GFP_DMA);
-               if (!out) {
-                       dev_err(codec->dev,
-                               "Failed to allocate RX buffer\n");
+               if (!out)
                        goto abort;
-               }
 
                img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
                if (!img_swap)
index 23cde3a..abfa052 100644 (file)
@@ -13,7 +13,7 @@
  * 'wm2000_anc.bin' by default (overridable via platform data) at
  * runtime and is expected to be in flat binary format.  This is
  * generated by Wolfson configuration tools and includes
- * system-specific callibration information.  If supplied as a
+ * system-specific calibration information.  If supplied as a
  * sequence of ASCII-encoded hexidecimal bytes this can be converted
  * into a flat binary with a command such as this on the command line:
  *
@@ -826,8 +826,7 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
        int reg;
        u16 id;
 
-       wm2000 = devm_kzalloc(&i2c->dev, sizeof(struct wm2000_priv),
-                             GFP_KERNEL);
+       wm2000 = devm_kzalloc(&i2c->dev, sizeof(*wm2000), GFP_KERNEL);
        if (!wm2000)
                return -ENOMEM;
 
@@ -902,7 +901,6 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
                                            wm2000->anc_download_size,
                                            GFP_KERNEL);
        if (wm2000->anc_download == NULL) {
-               dev_err(&i2c->dev, "Out of memory\n");
                ret = -ENOMEM;
                goto err_supplies;
        }
index d83dab5..5c2f572 100644 (file)
@@ -98,6 +98,8 @@ struct wm2200_priv {
 
        int rev;
        int sysclk;
+
+       unsigned int symmetric_rates:1;
 };
 
 #define WM2200_DSP_RANGE_BASE (WM2200_MAX_REGISTER + 1)
@@ -1550,7 +1552,7 @@ static const struct snd_soc_dapm_route wm2200_dapm_routes[] = {
 
 static int wm2200_probe(struct snd_soc_codec *codec)
 {
-       struct wm2200_priv *wm2200 = dev_get_drvdata(codec->dev);
+       struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
        int ret;
 
        wm2200->codec = codec;
@@ -1758,7 +1760,7 @@ static int wm2200_hw_params(struct snd_pcm_substream *substream,
        lrclk = bclk_rates[bclk] / params_rate(params);
        dev_dbg(codec->dev, "Setting %dHz LRCLK\n", bclk_rates[bclk] / lrclk);
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
-           dai->symmetric_rates)
+           wm2200->symmetric_rates)
                snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_7,
                                    WM2200_AIF1RX_BCPF_MASK, lrclk);
        else
@@ -2059,13 +2061,14 @@ static int wm2200_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
 static int wm2200_dai_probe(struct snd_soc_dai *dai)
 {
        struct snd_soc_codec *codec = dai->codec;
+       struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
        unsigned int val = 0;
        int ret;
 
        ret = snd_soc_read(codec, WM2200_GPIO_CTRL_1);
        if (ret >= 0) {
                if ((ret & WM2200_GP1_FN_MASK) != 0) {
-                       dai->symmetric_rates = true;
+                       wm2200->symmetric_rates = true;
                        val = WM2200_AIF1TX_LRCLK_SRC;
                }
        } else {
index 4f0481d..fc066ca 100644 (file)
@@ -1935,8 +1935,11 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
        struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
        struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
        struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->core.arizona;
        int ret;
 
+       snd_soc_codec_init_regmap(codec, arizona->regmap);
+
        ret = wm_adsp2_codec_probe(&priv->core.adsp[0], codec);
        if (ret)
                return ret;
@@ -1989,17 +1992,9 @@ static unsigned int wm5102_digital_vu[] = {
        ARIZONA_DAC_DIGITAL_VOLUME_5R,
 };
 
-static struct regmap *wm5102_get_regmap(struct device *dev)
-{
-       struct wm5102_priv *priv = dev_get_drvdata(dev);
-
-       return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
        .probe = wm5102_codec_probe,
        .remove = wm5102_codec_remove,
-       .get_regmap = wm5102_get_regmap,
 
        .idle_bias_off = true,
 
index 6ed1e1f..fb0cf9c 100644 (file)
@@ -2280,9 +2280,11 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
        struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
        struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
        struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->core.arizona;
        int i, ret;
 
-       priv->core.arizona->dapm = dapm;
+       arizona->dapm = dapm;
+       snd_soc_codec_init_regmap(codec, arizona->regmap);
 
        ret = arizona_init_spk(codec);
        if (ret < 0)
@@ -2344,17 +2346,9 @@ static unsigned int wm5110_digital_vu[] = {
        ARIZONA_DAC_DIGITAL_VOLUME_6R,
 };
 
-static struct regmap *wm5110_get_regmap(struct device *dev)
-{
-       struct wm5110_priv *priv = dev_get_drvdata(dev);
-
-       return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
        .probe = wm5110_codec_probe,
        .remove = wm5110_codec_remove,
-       .get_regmap = wm5110_get_regmap,
 
        .idle_bias_off = true,
 
index 2efc5b4..fc79c67 100644 (file)
@@ -1472,6 +1472,8 @@ static  int wm8350_codec_probe(struct snd_soc_codec *codec)
                            GFP_KERNEL);
        if (priv == NULL)
                return -ENOMEM;
+
+       snd_soc_codec_init_regmap(codec, wm8350->regmap);
        snd_soc_codec_set_drvdata(codec, priv);
 
        priv->wm8350 = wm8350;
@@ -1580,17 +1582,9 @@ static int  wm8350_codec_remove(struct snd_soc_codec *codec)
        return 0;
 }
 
-static struct regmap *wm8350_get_regmap(struct device *dev)
-{
-       struct wm8350 *wm8350 = dev_get_platdata(dev);
-
-       return wm8350->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
        .probe =        wm8350_codec_probe,
        .remove =       wm8350_codec_remove,
-       .get_regmap =   wm8350_get_regmap,
        .set_bias_level = wm8350_set_bias_level,
        .suspend_bias_off = true,
 
index 6c59fb9..a36adf8 100644 (file)
@@ -1285,6 +1285,7 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
        if (priv == NULL)
                return -ENOMEM;
 
+       snd_soc_codec_init_regmap(codec, wm8400->regmap);
        snd_soc_codec_set_drvdata(codec, priv);
        priv->wm8400 = wm8400;
 
@@ -1325,17 +1326,9 @@ static int  wm8400_codec_remove(struct snd_soc_codec *codec)
        return 0;
 }
 
-static struct regmap *wm8400_get_regmap(struct device *dev)
-{
-       struct wm8400 *wm8400 = dev_get_platdata(dev);
-
-       return wm8400->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
        .probe =        wm8400_codec_probe,
        .remove =       wm8400_codec_remove,
-       .get_regmap =   wm8400_get_regmap,
        .set_bias_level = wm8400_set_bias_level,
        .suspend_bias_off = true,
 
index 237eeb9..cba90f2 100644 (file)
@@ -1995,8 +1995,7 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
        unsigned int val, irq_pol;
        int ret, i;
 
-       wm8903 = devm_kzalloc(&i2c->dev,  sizeof(struct wm8903_priv),
-                             GFP_KERNEL);
+       wm8903 = devm_kzalloc(&i2c->dev, sizeof(*wm8903), GFP_KERNEL);
        if (wm8903 == NULL)
                return -ENOMEM;
 
@@ -2017,13 +2016,10 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
        if (pdata) {
                wm8903->pdata = pdata;
        } else {
-               wm8903->pdata = devm_kzalloc(&i2c->dev,
-                                       sizeof(struct wm8903_platform_data),
-                                       GFP_KERNEL);
-               if (wm8903->pdata == NULL) {
-                       dev_err(&i2c->dev, "Failed to allocate pdata\n");
+               wm8903->pdata = devm_kzalloc(&i2c->dev, sizeof(*wm8903->pdata),
+                                            GFP_KERNEL);
+               if (!wm8903->pdata)
                        return -ENOMEM;
-               }
 
                if (i2c->irq) {
                        ret = wm8903_set_pdata_irq_trigger(i2c, wm8903->pdata);
index f91b49e..21ffd64 100644 (file)
@@ -3993,6 +3993,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
        unsigned int reg;
        int ret, i;
 
+       snd_soc_codec_init_regmap(codec, control->regmap);
+
        wm8994->hubs.codec = codec;
 
        mutex_init(&wm8994->accdet_lock);
@@ -4434,19 +4436,11 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
        return 0;
 }
 
-static struct regmap *wm8994_get_regmap(struct device *dev)
-{
-       struct wm8994 *control = dev_get_drvdata(dev->parent);
-
-       return control->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8994 = {
        .probe =        wm8994_codec_probe,
        .remove =       wm8994_codec_remove,
        .suspend =      wm8994_codec_suspend,
        .resume =       wm8994_codec_resume,
-       .get_regmap =   wm8994_get_regmap,
        .set_bias_level = wm8994_set_bias_level,
 };
 
index 77f5127..cac9b3e 100644 (file)
@@ -1062,8 +1062,11 @@ static int wm8997_codec_probe(struct snd_soc_codec *codec)
        struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
        struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
        struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->core.arizona;
        int ret;
 
+       snd_soc_codec_init_regmap(codec, arizona->regmap);
+
        ret = arizona_init_spk(codec);
        if (ret < 0)
                return ret;
@@ -1095,17 +1098,9 @@ static unsigned int wm8997_digital_vu[] = {
        ARIZONA_DAC_DIGITAL_VOLUME_5R,
 };
 
-static struct regmap *wm8997_get_regmap(struct device *dev)
-{
-       struct wm8997_priv *priv = dev_get_drvdata(dev);
-
-       return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
        .probe = wm8997_codec_probe,
        .remove = wm8997_codec_remove,
-       .get_regmap =   wm8997_get_regmap,
 
        .idle_bias_off = true,
 
index 2d211db..1288e1f 100644 (file)
@@ -1275,9 +1275,11 @@ static int wm8998_codec_probe(struct snd_soc_codec *codec)
        struct wm8998_priv *priv = snd_soc_codec_get_drvdata(codec);
        struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
        struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
+       struct arizona *arizona = priv->core.arizona;
        int ret;
 
-       priv->core.arizona->dapm = dapm;
+       arizona->dapm = dapm;
+       snd_soc_codec_init_regmap(codec, arizona->regmap);
 
        ret = arizona_init_spk(codec);
        if (ret < 0)
@@ -1313,17 +1315,9 @@ static unsigned int wm8998_digital_vu[] = {
        ARIZONA_DAC_DIGITAL_VOLUME_5R,
 };
 
-static struct regmap *wm8998_get_regmap(struct device *dev)
-{
-       struct wm8998_priv *priv = dev_get_drvdata(dev);
-
-       return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8998 = {
        .probe = wm8998_codec_probe,
        .remove = wm8998_codec_remove,
-       .get_regmap = wm8998_get_regmap,
 
        .idle_bias_off = true,
 
index 65c059b..66e32f5 100644 (file)
@@ -1733,7 +1733,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                 le64_to_cpu(footer->timestamp));
 
        while (pos < firmware->size &&
-              pos - firmware->size > sizeof(*region)) {
+              sizeof(*region) < firmware->size - pos) {
                region = (void *)&(firmware->data[pos]);
                region_name = "Unknown";
                reg = 0;
@@ -1782,8 +1782,8 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                         regions, le32_to_cpu(region->len), offset,
                         region_name);
 
-               if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
-                   firmware->size) {
+               if (le32_to_cpu(region->len) >
+                   firmware->size - pos - sizeof(*region)) {
                        adsp_err(dsp,
                                 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
                                 file, regions, region_name,
@@ -2253,7 +2253,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
 
        blocks = 0;
        while (pos < firmware->size &&
-              pos - firmware->size > sizeof(*blk)) {
+              sizeof(*blk) < firmware->size - pos) {
                blk = (void *)(&firmware->data[pos]);
 
                type = le16_to_cpu(blk->type);
@@ -2327,8 +2327,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                }
 
                if (reg) {
-                       if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
-                           firmware->size) {
+                       if (le32_to_cpu(blk->len) >
+                           firmware->size - pos - sizeof(*blk)) {
                                adsp_err(dsp,
                                         "%s.%d: %s region len %d bytes exceeds file length %zu\n",
                                         file, blocks, region_name,
index 804c6f2..03ba218 100644 (file)
@@ -1242,6 +1242,20 @@ static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params,
        return snd_mask_refine(fmt, &nfmt);
 }
 
+static int davinci_mcasp_hw_rule_min_periodsize(
+               struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
+{
+       struct snd_interval *period_size = hw_param_interval(params,
+                                               SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+       struct snd_interval frames;
+
+       snd_interval_any(&frames);
+       frames.min = 64;
+       frames.integer = 1;
+
+       return snd_interval_refine(period_size, &frames);
+}
+
 static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
                                 struct snd_soc_dai *cpu_dai)
 {
@@ -1333,6 +1347,11 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
                        return ret;
        }
 
+       snd_pcm_hw_rule_add(substream->runtime, 0,
+                           SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+                           davinci_mcasp_hw_rule_min_periodsize, NULL,
+                           SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+
        return 0;
 }
 
index 84ef638..191426a 100644 (file)
@@ -29,7 +29,6 @@
 
 #include "../codecs/tlv320aic23.h"
 #include "imx-ssi.h"
-#include "fsl_ssi.h"
 #include "imx-audmux.h"
 
 #define CODEC_CLOCK 12000000
index 1225e03..989be51 100644 (file)
@@ -442,8 +442,8 @@ static int fsl_asoc_card_late_probe(struct snd_soc_card *card)
 
        if (fsl_asoc_card_is_ac97(priv)) {
 #if IS_ENABLED(CONFIG_SND_AC97_CODEC)
-               struct snd_soc_codec *codec = rtd->codec;
-               struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+               struct snd_soc_component *component = rtd->codec_dai->component;
+               struct snd_ac97 *ac97 = snd_soc_component_get_drvdata(component);
 
                /*
                 * Use slots 3/4 for S/PDIF so SSI won't try to enable
index 0f163ab..2c5856a 100644 (file)
@@ -57,7 +57,7 @@
 #define REG_ASRDOC                     0x74
 #define REG_ASRDI(i)                   (REG_ASRDIA + (i << 3))
 #define REG_ASRDO(i)                   (REG_ASRDOA + (i << 3))
-#define REG_ASRDx(x, i)                        (x == IN ? REG_ASRDI(i) : REG_ASRDO(i))
+#define REG_ASRDx(x, i)                        ((x) == IN ? REG_ASRDI(i) : REG_ASRDO(i))
 
 #define REG_ASRIDRHA                   0x80
 #define REG_ASRIDRLA                   0x84
 #define ASRFSTi_OUTPUT_FIFO_SHIFT      12
 #define ASRFSTi_OUTPUT_FIFO_MASK       (((1 << ASRFSTi_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTi_OUTPUT_FIFO_SHIFT)
 #define ASRFSTi_IAEi_SHIFT             11
-#define ASRFSTi_IAEi_MASK              (1 << ASRFSTi_OAFi_SHIFT)
-#define ASRFSTi_IAEi                   (1 << ASRFSTi_OAFi_SHIFT)
+#define ASRFSTi_IAEi_MASK              (1 << ASRFSTi_IAEi_SHIFT)
+#define ASRFSTi_IAEi                   (1 << ASRFSTi_IAEi_SHIFT)
 #define ASRFSTi_INPUT_FIFO_WIDTH       7
 #define ASRFSTi_INPUT_FIFO_SHIFT       0
 #define ASRFSTi_INPUT_FIFO_MASK                ((1 << ASRFSTi_INPUT_FIFO_WIDTH) - 1)
index 0c11f43..8c2981b 100644 (file)
@@ -913,8 +913,8 @@ static int fsl_soc_dma_probe(struct platform_device *pdev)
        dma->dai.pcm_free = fsl_dma_free_dma_buffers;
 
        /* Store the SSI-specific information that we need */
-       dma->ssi_stx_phys = res.start + CCSR_SSI_STX0;
-       dma->ssi_srx_phys = res.start + CCSR_SSI_SRX0;
+       dma->ssi_stx_phys = res.start + REG_SSI_STX0;
+       dma->ssi_srx_phys = res.start + REG_SSI_SRX0;
 
        iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL);
        if (iprop)
index f2f51e0..aecd00f 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/ctype.h>
 #include <linux/device.h>
 #include <linux/delay.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/of.h>
  * samples will be written to STX properly.
  */
 #ifdef __BIG_ENDIAN
-#define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \
-        SNDRV_PCM_FMTBIT_S18_3BE | SNDRV_PCM_FMTBIT_S20_3BE | \
-        SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_S24_BE)
+#define FSLSSI_I2S_FORMATS \
+       (SNDRV_PCM_FMTBIT_S8 | \
+        SNDRV_PCM_FMTBIT_S16_BE | \
+        SNDRV_PCM_FMTBIT_S18_3BE | \
+        SNDRV_PCM_FMTBIT_S20_3BE | \
+        SNDRV_PCM_FMTBIT_S24_3BE | \
+        SNDRV_PCM_FMTBIT_S24_BE)
 #else
-#define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \
-        SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S20_3LE | \
-        SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+#define FSLSSI_I2S_FORMATS \
+       (SNDRV_PCM_FMTBIT_S8 | \
+        SNDRV_PCM_FMTBIT_S16_LE | \
+        SNDRV_PCM_FMTBIT_S18_3LE | \
+        SNDRV_PCM_FMTBIT_S20_3LE | \
+        SNDRV_PCM_FMTBIT_S24_3LE | \
+        SNDRV_PCM_FMTBIT_S24_LE)
 #endif
 
-#define FSLSSI_SIER_DBG_RX_FLAGS (CCSR_SSI_SIER_RFF0_EN | \
-               CCSR_SSI_SIER_RLS_EN | CCSR_SSI_SIER_RFS_EN | \
-               CCSR_SSI_SIER_ROE0_EN | CCSR_SSI_SIER_RFRC_EN)
-#define FSLSSI_SIER_DBG_TX_FLAGS (CCSR_SSI_SIER_TFE0_EN | \
-               CCSR_SSI_SIER_TLS_EN | CCSR_SSI_SIER_TFS_EN | \
-               CCSR_SSI_SIER_TUE0_EN | CCSR_SSI_SIER_TFRC_EN)
+#define FSLSSI_SIER_DBG_RX_FLAGS \
+       (SSI_SIER_RFF0_EN | \
+        SSI_SIER_RLS_EN | \
+        SSI_SIER_RFS_EN | \
+        SSI_SIER_ROE0_EN | \
+        SSI_SIER_RFRC_EN)
+#define FSLSSI_SIER_DBG_TX_FLAGS \
+       (SSI_SIER_TFE0_EN | \
+        SSI_SIER_TLS_EN | \
+        SSI_SIER_TFS_EN | \
+        SSI_SIER_TUE0_EN | \
+        SSI_SIER_TFRC_EN)
 
 enum fsl_ssi_type {
        FSL_SSI_MCP8610,
@@ -91,23 +106,18 @@ enum fsl_ssi_type {
        FSL_SSI_MX51,
 };
 
-struct fsl_ssi_reg_val {
+struct fsl_ssi_regvals {
        u32 sier;
        u32 srcr;
        u32 stcr;
        u32 scr;
 };
 
-struct fsl_ssi_rxtx_reg_val {
-       struct fsl_ssi_reg_val rx;
-       struct fsl_ssi_reg_val tx;
-};
-
 static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case CCSR_SSI_SACCEN:
-       case CCSR_SSI_SACCDIS:
+       case REG_SSI_SACCEN:
+       case REG_SSI_SACCDIS:
                return false;
        default:
                return true;
@@ -117,18 +127,18 @@ static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
 static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case CCSR_SSI_STX0:
-       case CCSR_SSI_STX1:
-       case CCSR_SSI_SRX0:
-       case CCSR_SSI_SRX1:
-       case CCSR_SSI_SISR:
-       case CCSR_SSI_SFCSR:
-       case CCSR_SSI_SACNT:
-       case CCSR_SSI_SACADD:
-       case CCSR_SSI_SACDAT:
-       case CCSR_SSI_SATAG:
-       case CCSR_SSI_SACCST:
-       case CCSR_SSI_SOR:
+       case REG_SSI_STX0:
+       case REG_SSI_STX1:
+       case REG_SSI_SRX0:
+       case REG_SSI_SRX1:
+       case REG_SSI_SISR:
+       case REG_SSI_SFCSR:
+       case REG_SSI_SACNT:
+       case REG_SSI_SACADD:
+       case REG_SSI_SACDAT:
+       case REG_SSI_SATAG:
+       case REG_SSI_SACCST:
+       case REG_SSI_SOR:
                return true;
        default:
                return false;
@@ -138,12 +148,12 @@ static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg)
 static bool fsl_ssi_precious_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case CCSR_SSI_SRX0:
-       case CCSR_SSI_SRX1:
-       case CCSR_SSI_SISR:
-       case CCSR_SSI_SACADD:
-       case CCSR_SSI_SACDAT:
-       case CCSR_SSI_SATAG:
+       case REG_SSI_SRX0:
+       case REG_SSI_SRX1:
+       case REG_SSI_SISR:
+       case REG_SSI_SACADD:
+       case REG_SSI_SACDAT:
+       case REG_SSI_SATAG:
                return true;
        default:
                return false;
@@ -153,9 +163,9 @@ static bool fsl_ssi_precious_reg(struct device *dev, unsigned int reg)
 static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case CCSR_SSI_SRX0:
-       case CCSR_SSI_SRX1:
-       case CCSR_SSI_SACCST:
+       case REG_SSI_SRX0:
+       case REG_SSI_SRX1:
+       case REG_SSI_SACCST:
                return false;
        default:
                return true;
@@ -163,12 +173,12 @@ static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg)
 }
 
 static const struct regmap_config fsl_ssi_regconfig = {
-       .max_register = CCSR_SSI_SACCDIS,
+       .max_register = REG_SSI_SACCDIS,
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
        .val_format_endian = REGMAP_ENDIAN_NATIVE,
-       .num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
+       .num_reg_defaults_raw = REG_SSI_SACCDIS / sizeof(uint32_t) + 1,
        .readable_reg = fsl_ssi_readable_reg,
        .volatile_reg = fsl_ssi_volatile_reg,
        .precious_reg = fsl_ssi_precious_reg,
@@ -184,78 +194,79 @@ struct fsl_ssi_soc_data {
 };
 
 /**
- * fsl_ssi_private: per-SSI private data
+ * fsl_ssi: per-SSI private data
  *
- * @reg: Pointer to the regmap registers
+ * @regs: Pointer to the regmap registers
  * @irq: IRQ of this SSI
  * @cpu_dai_drv: CPU DAI driver for this device
  *
  * @dai_fmt: DAI configuration this device is currently used with
- * @i2s_mode: i2s and network mode configuration of the device. Is used to
- * switch between normal and i2s/network mode
- * mode depending on the number of channels
+ * @i2s_net: I2S and Network mode configurations of SCR register
  * @use_dma: DMA is used or FIQ with stream filter
- * @use_dual_fifo: DMA with support for both FIFOs used
- * @fifo_deph: Depth of the SSI FIFOs
- * @slot_width: width of each DAI slot
- * @slots: number of slots
- * @rxtx_reg_val: Specific register settings for receive/transmit configuration
+ * @use_dual_fifo: DMA with support for dual FIFO mode
+ * @has_ipg_clk_name: If "ipg" is in the clock name list of device tree
+ * @fifo_depth: Depth of the SSI FIFOs
+ * @slot_width: Width of each DAI slot
+ * @slots: Number of slots
+ * @regvals: Specific RX/TX register settings
  *
- * @clk: SSI clock
- * @baudclk: SSI baud clock for master mode
+ * @clk: Clock source to access register
+ * @baudclk: Clock source to generate bit and frame-sync clocks
  * @baudclk_streams: Active streams that are using baudclk
  *
+ * @regcache_sfcsr: Cache sfcsr register value during suspend and resume
+ * @regcache_sacnt: Cache sacnt register value during suspend and resume
+ *
  * @dma_params_tx: DMA transmit parameters
  * @dma_params_rx: DMA receive parameters
  * @ssi_phys: physical address of the SSI registers
  *
  * @fiq_params: FIQ stream filtering parameters
  *
- * @pdev: Pointer to pdev used for deprecated fsl-ssi sound card
+ * @pdev: Pointer to pdev when using fsl-ssi as sound card (ppc only)
+ *        TODO: Should be replaced with simple-sound-card
  *
  * @dbg_stats: Debugging statistics
  *
  * @soc: SoC specific data
+ * @dev: Pointer to &pdev->dev
+ *
+ * @fifo_watermark: The FIFO watermark setting. Notifies DMA when there are
+ *                  @fifo_watermark or fewer words in TX fifo or
+ *                  @fifo_watermark or more empty words in RX fifo.
+ * @dma_maxburst: Max number of words to transfer in one go. So far,
+ *                this is always the same as fifo_watermark.
  *
- * @fifo_watermark: the FIFO watermark setting.  Notifies DMA when
- *             there are @fifo_watermark or fewer words in TX fifo or
- *             @fifo_watermark or more empty words in RX fifo.
- * @dma_maxburst: max number of words to transfer in one go.  So far,
- *             this is always the same as fifo_watermark.
+ * @ac97_reg_lock: Mutex lock to serialize AC97 register access operations
  */
-struct fsl_ssi_private {
+struct fsl_ssi {
        struct regmap *regs;
        int irq;
        struct snd_soc_dai_driver cpu_dai_drv;
 
        unsigned int dai_fmt;
-       u8 i2s_mode;
+       u8 i2s_net;
        bool use_dma;
        bool use_dual_fifo;
        bool has_ipg_clk_name;
        unsigned int fifo_depth;
        unsigned int slot_width;
        unsigned int slots;
-       struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
+       struct fsl_ssi_regvals regvals[2];
 
        struct clk *clk;
        struct clk *baudclk;
        unsigned int baudclk_streams;
 
-       /* regcache for volatile regs */
        u32 regcache_sfcsr;
        u32 regcache_sacnt;
 
-       /* DMA params */
        struct snd_dmaengine_dai_dma_data dma_params_tx;
        struct snd_dmaengine_dai_dma_data dma_params_rx;
        dma_addr_t ssi_phys;
 
-       /* params for non-dma FIQ stream filtered mode */
        struct imx_pcm_fiq_params fiq_params;
 
-       /* Used when using fsl-ssi as sound-card. This is only used by ppc and
-        * should be replaced with simple-sound-card. */
        struct platform_device *pdev;
 
        struct fsl_ssi_dbg dbg_stats;
@@ -265,30 +276,32 @@ struct fsl_ssi_private {
 
        u32 fifo_watermark;
        u32 dma_maxburst;
+
+       struct mutex ac97_reg_lock;
 };
 
 /*
- * imx51 and later SoCs have a slightly different IP that allows the
- * SSI configuration while the SSI unit is running.
- *
- * More important, it is necessary on those SoCs to configure the
- * sperate TX/RX DMA bits just before starting the stream
- * (fsl_ssi_trigger). The SDMA unit has to be configured before fsl_ssi
- * sends any DMA requests to the SDMA unit, otherwise it is not defined
- * how the SDMA unit handles the DMA request.
+ * SoC specific data
  *
- * SDMA units are present on devices starting at imx35 but the imx35
- * reference manual states that the DMA bits should not be changed
- * while the SSI unit is running (SSIEN). So we support the necessary
- * online configuration of fsl-ssi starting at imx51.
+ * Notes:
+ * 1) SSI in earlier SoCS has critical bits in control registers that
+ *    cannot be changed after SSI starts running -- a software reset
+ *    (set SSIEN to 0) is required to change their values. So adding
+ *    an offline_config flag for these SoCs.
+ * 2) SDMA is available since imx35. However, imx35 does not support
+ *    DMA bits changing when SSI is running, so set offline_config.
+ * 3) imx51 and later versions support register configurations when
+ *    SSI is running (SSIEN); For these versions, DMA needs to be
+ *    configured before SSI sends DMA request to avoid an undefined
+ *    DMA request on the SDMA side.
  */
 
 static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = {
        .imx = false,
        .offline_config = true,
-       .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
-                       CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
-                       CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+       .sisr_write_mask = SSI_SISR_RFRC | SSI_SISR_TFRC |
+                          SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+                          SSI_SISR_TUE0 | SSI_SISR_TUE1,
 };
 
 static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
@@ -301,16 +314,16 @@ static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
 static struct fsl_ssi_soc_data fsl_ssi_imx35 = {
        .imx = true,
        .offline_config = true,
-       .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
-                       CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
-                       CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+       .sisr_write_mask = SSI_SISR_RFRC | SSI_SISR_TFRC |
+                          SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+                          SSI_SISR_TUE0 | SSI_SISR_TUE1,
 };
 
 static struct fsl_ssi_soc_data fsl_ssi_imx51 = {
        .imx = true,
        .offline_config = false,
-       .sisr_write_mask = CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
-               CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+       .sisr_write_mask = SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+                          SSI_SISR_TUE0 | SSI_SISR_TUE1,
 };
 
 static const struct of_device_id fsl_ssi_ids[] = {
@@ -322,108 +335,86 @@ static const struct of_device_id fsl_ssi_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, fsl_ssi_ids);
 
-static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_ac97(struct fsl_ssi *ssi)
 {
-       return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+       return (ssi->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
                SND_SOC_DAIFMT_AC97;
 }
 
-static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_i2s_master(struct fsl_ssi *ssi)
 {
-       return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+       return (ssi->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
                SND_SOC_DAIFMT_CBS_CFS;
 }
 
-static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi *ssi)
 {
-       return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+       return (ssi->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
                SND_SOC_DAIFMT_CBM_CFS;
 }
+
 /**
- * fsl_ssi_isr: SSI interrupt handler
- *
- * Although it's possible to use the interrupt handler to send and receive
- * data to/from the SSI, we use the DMA instead.  Programming is more
- * complicated, but the performance is much better.
- *
- * This interrupt handler is used only to gather statistics.
- *
- * @irq: IRQ of the SSI device
- * @dev_id: pointer to the ssi_private structure for this SSI device
+ * Interrupt handler to gather states
  */
 static irqreturn_t fsl_ssi_isr(int irq, void *dev_id)
 {
-       struct fsl_ssi_private *ssi_private = dev_id;
-       struct regmap *regs = ssi_private->regs;
+       struct fsl_ssi *ssi = dev_id;
+       struct regmap *regs = ssi->regs;
        __be32 sisr;
        __be32 sisr2;
 
-       /* We got an interrupt, so read the status register to see what we
-          were interrupted for.  We mask it with the Interrupt Enable register
-          so that we only check for events that we're interested in.
-        */
-       regmap_read(regs, CCSR_SSI_SISR, &sisr);
+       regmap_read(regs, REG_SSI_SISR, &sisr);
 
-       sisr2 = sisr & ssi_private->soc->sisr_write_mask;
+       sisr2 = sisr & ssi->soc->sisr_write_mask;
        /* Clear the bits that we set */
        if (sisr2)
-               regmap_write(regs, CCSR_SSI_SISR, sisr2);
+               regmap_write(regs, REG_SSI_SISR, sisr2);
 
-       fsl_ssi_dbg_isr(&ssi_private->dbg_stats, sisr);
+       fsl_ssi_dbg_isr(&ssi->dbg_stats, sisr);
 
        return IRQ_HANDLED;
 }
 
-/*
- * Enable/Disable all rx/tx config flags at once.
+/**
+ * Enable or disable all rx/tx config flags at once
  */
-static void fsl_ssi_rxtx_config(struct fsl_ssi_private *ssi_private,
-               bool enable)
+static void fsl_ssi_rxtx_config(struct fsl_ssi *ssi, bool enable)
 {
-       struct regmap *regs = ssi_private->regs;
-       struct fsl_ssi_rxtx_reg_val *vals = &ssi_private->rxtx_reg_val;
+       struct regmap *regs = ssi->regs;
+       struct fsl_ssi_regvals *vals = ssi->regvals;
 
        if (enable) {
-               regmap_update_bits(regs, CCSR_SSI_SIER,
-                               vals->rx.sier | vals->tx.sier,
-                               vals->rx.sier | vals->tx.sier);
-               regmap_update_bits(regs, CCSR_SSI_SRCR,
-                               vals->rx.srcr | vals->tx.srcr,
-                               vals->rx.srcr | vals->tx.srcr);
-               regmap_update_bits(regs, CCSR_SSI_STCR,
-                               vals->rx.stcr | vals->tx.stcr,
-                               vals->rx.stcr | vals->tx.stcr);
+               regmap_update_bits(regs, REG_SSI_SIER,
+                                  vals[RX].sier | vals[TX].sier,
+                                  vals[RX].sier | vals[TX].sier);
+               regmap_update_bits(regs, REG_SSI_SRCR,
+                                  vals[RX].srcr | vals[TX].srcr,
+                                  vals[RX].srcr | vals[TX].srcr);
+               regmap_update_bits(regs, REG_SSI_STCR,
+                                  vals[RX].stcr | vals[TX].stcr,
+                                  vals[RX].stcr | vals[TX].stcr);
        } else {
-               regmap_update_bits(regs, CCSR_SSI_SRCR,
-                               vals->rx.srcr | vals->tx.srcr, 0);
-               regmap_update_bits(regs, CCSR_SSI_STCR,
-                               vals->rx.stcr | vals->tx.stcr, 0);
-               regmap_update_bits(regs, CCSR_SSI_SIER,
-                               vals->rx.sier | vals->tx.sier, 0);
+               regmap_update_bits(regs, REG_SSI_SRCR,
+                                  vals[RX].srcr | vals[TX].srcr, 0);
+               regmap_update_bits(regs, REG_SSI_STCR,
+                                  vals[RX].stcr | vals[TX].stcr, 0);
+               regmap_update_bits(regs, REG_SSI_SIER,
+                                  vals[RX].sier | vals[TX].sier, 0);
        }
 }
 
-/*
- * Clear RX or TX FIFO to remove samples from the previous
- * stream session which may be still present in the FIFO and
- * may introduce bad samples and/or channel slipping.
- *
- * Note: The SOR is not documented in recent IMX datasheet, but
- * is described in IMX51 reference manual at section 56.3.3.15.
+/**
+ * Clear remaining data in the FIFO to avoid dirty data or channel slipping
  */
-static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
-               bool is_rx)
+static void fsl_ssi_fifo_clear(struct fsl_ssi *ssi, bool is_rx)
 {
-       if (is_rx) {
-               regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
-                       CCSR_SSI_SOR_RX_CLR, CCSR_SSI_SOR_RX_CLR);
-       } else {
-               regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
-                       CCSR_SSI_SOR_TX_CLR, CCSR_SSI_SOR_TX_CLR);
-       }
+       bool tx = !is_rx;
+
+       regmap_update_bits(ssi->regs, REG_SSI_SOR,
+                          SSI_SOR_xX_CLR(tx), SSI_SOR_xX_CLR(tx));
 }
 
-/*
+/**
  * Calculate the bits that have to be disabled for the current stream that is
  * getting disabled. This keeps the bits enabled that are necessary for the
  * second stream to work if 'stream_active' is true.
@@ -443,261 +434,239 @@ static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
        ((vals_disable) & \
         ((vals_disable) ^ ((vals_stream) * (u32)!!(stream_active))))
 
-/*
- * Enable/Disable a ssi configuration. You have to pass either
- * ssi_private->rxtx_reg_val.rx or tx as vals parameter.
+/**
+ * Enable or disable SSI configuration.
  */
-static void fsl_ssi_config(struct fsl_ssi_private *ssi_private, bool enable,
-               struct fsl_ssi_reg_val *vals)
+static void fsl_ssi_config(struct fsl_ssi *ssi, bool enable,
+                          struct fsl_ssi_regvals *vals)
 {
-       struct regmap *regs = ssi_private->regs;
-       struct fsl_ssi_reg_val *avals;
+       struct regmap *regs = ssi->regs;
+       struct fsl_ssi_regvals *avals;
        int nr_active_streams;
-       u32 scr_val;
+       u32 scr;
        int keep_active;
 
-       regmap_read(regs, CCSR_SSI_SCR, &scr_val);
+       regmap_read(regs, REG_SSI_SCR, &scr);
 
-       nr_active_streams = !!(scr_val & CCSR_SSI_SCR_TE) +
-                               !!(scr_val & CCSR_SSI_SCR_RE);
+       nr_active_streams = !!(scr & SSI_SCR_TE) + !!(scr & SSI_SCR_RE);
 
        if (nr_active_streams - 1 > 0)
                keep_active = 1;
        else
                keep_active = 0;
 
-       /* Find the other direction values rx or tx which we do not want to
-        * modify */
-       if (&ssi_private->rxtx_reg_val.rx == vals)
-               avals = &ssi_private->rxtx_reg_val.tx;
+       /* Get the opposite direction to keep its values untouched */
+       if (&ssi->regvals[RX] == vals)
+               avals = &ssi->regvals[TX];
        else
-               avals = &ssi_private->rxtx_reg_val.rx;
+               avals = &ssi->regvals[RX];
 
-       /* If vals should be disabled, start with disabling the unit */
        if (!enable) {
+               /*
+                * To keep the other stream safe, exclude shared bits between
+                * both streams, and get safe bits to disable current stream
+                */
                u32 scr = fsl_ssi_disable_val(vals->scr, avals->scr,
-                               keep_active);
-               regmap_update_bits(regs, CCSR_SSI_SCR, scr, 0);
+                                             keep_active);
+               /* Safely disable SCR register for the stream */
+               regmap_update_bits(regs, REG_SSI_SCR, scr, 0);
        }
 
        /*
-        * We are running on a SoC which does not support online SSI
-        * reconfiguration, so we have to enable all necessary flags at once
-        * even if we do not use them later (capture and playback configuration)
+        * For cases where online configuration is not supported,
+        * 1) Enable all necessary bits of both streams when 1st stream starts
+        *    even if the opposite stream will not start
+        * 2) Disable all remaining bits of both streams when last stream ends
         */
-       if (ssi_private->soc->offline_config) {
-               if ((enable && !nr_active_streams) ||
-                               (!enable && !keep_active))
-                       fsl_ssi_rxtx_config(ssi_private, enable);
+       if (ssi->soc->offline_config) {
+               if ((enable && !nr_active_streams) || (!enable && !keep_active))
+                       fsl_ssi_rxtx_config(ssi, enable);
 
                goto config_done;
        }
 
-       /*
-        * Configure single direction units while the SSI unit is running
-        * (online configuration)
-        */
+       /* Online configure single direction while SSI is running */
        if (enable) {
-               fsl_ssi_fifo_clear(ssi_private, vals->scr & CCSR_SSI_SCR_RE);
+               fsl_ssi_fifo_clear(ssi, vals->scr & SSI_SCR_RE);
 
-               regmap_update_bits(regs, CCSR_SSI_SRCR, vals->srcr, vals->srcr);
-               regmap_update_bits(regs, CCSR_SSI_STCR, vals->stcr, vals->stcr);
-               regmap_update_bits(regs, CCSR_SSI_SIER, vals->sier, vals->sier);
+               regmap_update_bits(regs, REG_SSI_SRCR, vals->srcr, vals->srcr);
+               regmap_update_bits(regs, REG_SSI_STCR, vals->stcr, vals->stcr);
+               regmap_update_bits(regs, REG_SSI_SIER, vals->sier, vals->sier);
        } else {
                u32 sier;
                u32 srcr;
                u32 stcr;
 
                /*
-                * Disabling the necessary flags for one of rx/tx while the
-                * other stream is active is a little bit more difficult. We
-                * have to disable only those flags that differ between both
-                * streams (rx XOR tx) and that are set in the stream that is
-                * disabled now. Otherwise we could alter flags of the other
-                * stream
+                * To keep the other stream safe, exclude shared bits between
+                * both streams, and get safe bits to disable current stream
                 */
-
-               /* These assignments are simply vals without bits set in avals*/
                sier = fsl_ssi_disable_val(vals->sier, avals->sier,
-                               keep_active);
+                                          keep_active);
                srcr = fsl_ssi_disable_val(vals->srcr, avals->srcr,
-                               keep_active);
+                                          keep_active);
                stcr = fsl_ssi_disable_val(vals->stcr, avals->stcr,
-                               keep_active);
+                                          keep_active);
 
-               regmap_update_bits(regs, CCSR_SSI_SRCR, srcr, 0);
-               regmap_update_bits(regs, CCSR_SSI_STCR, stcr, 0);
-               regmap_update_bits(regs, CCSR_SSI_SIER, sier, 0);
+               /* Safely disable other control registers for the stream */
+               regmap_update_bits(regs, REG_SSI_SRCR, srcr, 0);
+               regmap_update_bits(regs, REG_SSI_STCR, stcr, 0);
+               regmap_update_bits(regs, REG_SSI_SIER, sier, 0);
        }
 
 config_done:
        /* Enabling of subunits is done after configuration */
        if (enable) {
-               if (ssi_private->use_dma && (vals->scr & CCSR_SSI_SCR_TE)) {
-                       /*
-                        * Be sure the Tx FIFO is filled when TE is set.
-                        * Otherwise, there are some chances to start the
-                        * playback with some void samples inserted first,
-                        * generating a channel slip.
-                        *
-                        * First, SSIEN must be set, to let the FIFO be filled.
-                        *
-                        * Notes:
-                        * - Limit this fix to the DMA case until FIQ cases can
-                        *   be tested.
-                        * - Limit the length of the busy loop to not lock the
-                        *   system too long, even if 1-2 loops are sufficient
-                        *   in general.
-                        */
+               /*
+                * Start DMA before setting TE to avoid FIFO underrun
+                * which may cause a channel slip or a channel swap
+                *
+                * TODO: FIQ cases might also need this upon testing
+                */
+               if (ssi->use_dma && (vals->scr & SSI_SCR_TE)) {
                        int i;
                        int max_loop = 100;
-                       regmap_update_bits(regs, CCSR_SSI_SCR,
-                                       CCSR_SSI_SCR_SSIEN, CCSR_SSI_SCR_SSIEN);
+
+                       /* Enable SSI first to send TX DMA request */
+                       regmap_update_bits(regs, REG_SSI_SCR,
+                                          SSI_SCR_SSIEN, SSI_SCR_SSIEN);
+
+                       /* Busy wait until TX FIFO not empty -- DMA working */
                        for (i = 0; i < max_loop; i++) {
                                u32 sfcsr;
-                               regmap_read(regs, CCSR_SSI_SFCSR, &sfcsr);
-                               if (CCSR_SSI_SFCSR_TFCNT0(sfcsr))
+                               regmap_read(regs, REG_SSI_SFCSR, &sfcsr);
+                               if (SSI_SFCSR_TFCNT0(sfcsr))
                                        break;
                        }
                        if (i == max_loop) {
-                               dev_err(ssi_private->dev,
+                               dev_err(ssi->dev,
                                        "Timeout waiting TX FIFO filling\n");
                        }
                }
-               regmap_update_bits(regs, CCSR_SSI_SCR, vals->scr, vals->scr);
+               /* Enable all remaining bits */
+               regmap_update_bits(regs, REG_SSI_SCR, vals->scr, vals->scr);
        }
 }
 
+static void fsl_ssi_rx_config(struct fsl_ssi *ssi, bool enable)
+{
+       fsl_ssi_config(ssi, enable, &ssi->regvals[RX]);
+}
 
-static void fsl_ssi_rx_config(struct fsl_ssi_private *ssi_private, bool enable)
+static void fsl_ssi_tx_ac97_saccst_setup(struct fsl_ssi *ssi)
 {
-       fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.rx);
+       struct regmap *regs = ssi->regs;
+
+       /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
+       if (!ssi->soc->imx21regs) {
+               /* Disable all channel slots */
+               regmap_write(regs, REG_SSI_SACCDIS, 0xff);
+               /* Enable slots 3 & 4 -- PCM Playback Left & Right channels */
+               regmap_write(regs, REG_SSI_SACCEN, 0x300);
+       }
 }
 
-static void fsl_ssi_tx_config(struct fsl_ssi_private *ssi_private, bool enable)
+static void fsl_ssi_tx_config(struct fsl_ssi *ssi, bool enable)
 {
-       fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.tx);
+       /*
+        * SACCST might be modified via AC Link by a CODEC if it sends
+        * extra bits in their SLOTREQ requests, which'll accidentally
+        * send valid data to slots other than normal playback slots.
+        *
+        * To be safe, configure SACCST right before TX starts.
+        */
+       if (enable && fsl_ssi_is_ac97(ssi))
+               fsl_ssi_tx_ac97_saccst_setup(ssi);
+
+       fsl_ssi_config(ssi, enable, &ssi->regvals[TX]);
 }
 
-/*
- * Setup rx/tx register values used to enable/disable the streams. These will
- * be used later in fsl_ssi_config to setup the streams without the need to
- * check for all different SSI modes.
+/**
+ * Cache critical bits of SIER, SRCR, STCR and SCR to later set them safely
  */
-static void fsl_ssi_setup_reg_vals(struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_setup_regvals(struct fsl_ssi *ssi)
 {
-       struct fsl_ssi_rxtx_reg_val *reg = &ssi_private->rxtx_reg_val;
-
-       reg->rx.sier = CCSR_SSI_SIER_RFF0_EN;
-       reg->rx.srcr = CCSR_SSI_SRCR_RFEN0;
-       reg->rx.scr = 0;
-       reg->tx.sier = CCSR_SSI_SIER_TFE0_EN;
-       reg->tx.stcr = CCSR_SSI_STCR_TFEN0;
-       reg->tx.scr = 0;
-
-       if (!fsl_ssi_is_ac97(ssi_private)) {
-               reg->rx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE;
-               reg->rx.sier |= CCSR_SSI_SIER_RFF0_EN;
-               reg->tx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE;
-               reg->tx.sier |= CCSR_SSI_SIER_TFE0_EN;
+       struct fsl_ssi_regvals *vals = ssi->regvals;
+
+       vals[RX].sier = SSI_SIER_RFF0_EN;
+       vals[RX].srcr = SSI_SRCR_RFEN0;
+       vals[RX].scr = 0;
+       vals[TX].sier = SSI_SIER_TFE0_EN;
+       vals[TX].stcr = SSI_STCR_TFEN0;
+       vals[TX].scr = 0;
+
+       /* AC97 has already enabled SSIEN, RE and TE, so ignore them */
+       if (!fsl_ssi_is_ac97(ssi)) {
+               vals[RX].scr = SSI_SCR_SSIEN | SSI_SCR_RE;
+               vals[TX].scr = SSI_SCR_SSIEN | SSI_SCR_TE;
        }
 
-       if (ssi_private->use_dma) {
-               reg->rx.sier |= CCSR_SSI_SIER_RDMAE;
-               reg->tx.sier |= CCSR_SSI_SIER_TDMAE;
+       if (ssi->use_dma) {
+               vals[RX].sier |= SSI_SIER_RDMAE;
+               vals[TX].sier |= SSI_SIER_TDMAE;
        } else {
-               reg->rx.sier |= CCSR_SSI_SIER_RIE;
-               reg->tx.sier |= CCSR_SSI_SIER_TIE;
+               vals[RX].sier |= SSI_SIER_RIE;
+               vals[TX].sier |= SSI_SIER_TIE;
        }
 
-       reg->rx.sier |= FSLSSI_SIER_DBG_RX_FLAGS;
-       reg->tx.sier |= FSLSSI_SIER_DBG_TX_FLAGS;
+       vals[RX].sier |= FSLSSI_SIER_DBG_RX_FLAGS;
+       vals[TX].sier |= FSLSSI_SIER_DBG_TX_FLAGS;
 }
 
-static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_setup_ac97(struct fsl_ssi *ssi)
 {
-       struct regmap *regs = ssi_private->regs;
+       struct regmap *regs = ssi->regs;
 
-       /*
-        * Setup the clock control register
-        */
-       regmap_write(regs, CCSR_SSI_STCCR,
-                       CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
-       regmap_write(regs, CCSR_SSI_SRCCR,
-                       CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
+       /* Setup the clock control register */
+       regmap_write(regs, REG_SSI_STCCR, SSI_SxCCR_WL(17) | SSI_SxCCR_DC(13));
+       regmap_write(regs, REG_SSI_SRCCR, SSI_SxCCR_WL(17) | SSI_SxCCR_DC(13));
 
-       /*
-        * Enable AC97 mode and startup the SSI
-        */
-       regmap_write(regs, CCSR_SSI_SACNT,
-                       CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
-
-       /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
-       if (!ssi_private->soc->imx21regs) {
-               regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
-               regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
-       }
+       /* Enable AC97 mode and startup the SSI */
+       regmap_write(regs, REG_SSI_SACNT, SSI_SACNT_AC97EN | SSI_SACNT_FV);
 
-       /*
-        * Enable SSI, Transmit and Receive. AC97 has to communicate with the
-        * codec before a stream is started.
-        */
-       regmap_update_bits(regs, CCSR_SSI_SCR,
-                       CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE,
-                       CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE);
+       /* AC97 has to communicate with codec before starting a stream */
+       regmap_update_bits(regs, REG_SSI_SCR,
+                          SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE,
+                          SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE);
 
-       regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_WAIT(3));
+       regmap_write(regs, REG_SSI_SOR, SSI_SOR_WAIT(3));
 }
 
-/**
- * fsl_ssi_startup: create a new substream
- *
- * This is the first function called when a stream is opened.
- *
- * If this is the first stream open, then grab the IRQ and program most of
- * the SSI registers.
- */
 static int fsl_ssi_startup(struct snd_pcm_substream *substream,
                           struct snd_soc_dai *dai)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct fsl_ssi_private *ssi_private =
-               snd_soc_dai_get_drvdata(rtd->cpu_dai);
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
        int ret;
 
-       ret = clk_prepare_enable(ssi_private->clk);
+       ret = clk_prepare_enable(ssi->clk);
        if (ret)
                return ret;
 
-       /* When using dual fifo mode, it is safer to ensure an even period
+       /*
+        * When using dual fifo mode, it is safer to ensure an even period
         * size. If appearing to an odd number while DMA always starts its
         * task from fifo0, fifo1 would be neglected at the end of each
         * period. But SSI would still access fifo1 with an invalid data.
         */
-       if (ssi_private->use_dual_fifo)
+       if (ssi->use_dual_fifo)
                snd_pcm_hw_constraint_step(substream->runtime, 0,
-                               SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
+                                          SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
 
        return 0;
 }
 
-/**
- * fsl_ssi_shutdown: shutdown the SSI
- *
- */
 static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
-                               struct snd_soc_dai *dai)
+                            struct snd_soc_dai *dai)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct fsl_ssi_private *ssi_private =
-               snd_soc_dai_get_drvdata(rtd->cpu_dai);
-
-       clk_disable_unprepare(ssi_private->clk);
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 
+       clk_disable_unprepare(ssi->clk);
 }
 
 /**
- * fsl_ssi_set_bclk - configure Digital Audio Interface bit clock
+ * Configure Digital Audio Interface bit clock
  *
  * Note: This function can be only called when using SSI as DAI master
  *
@@ -706,12 +675,13 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
  *       (In 2-channel I2S Master mode, slot_width is fixed 32)
  */
 static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
-               struct snd_soc_dai *cpu_dai,
-               struct snd_pcm_hw_params *hw_params)
+                           struct snd_soc_dai *dai,
+                           struct snd_pcm_hw_params *hw_params)
 {
-       struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
-       struct regmap *regs = ssi_private->regs;
-       int synchronous = ssi_private->cpu_dai_drv.symmetric_rates, ret;
+       bool tx2, tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+       struct regmap *regs = ssi->regs;
+       int synchronous = ssi->cpu_dai_drv.symmetric_rates, ret;
        u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
        unsigned long clkrate, baudrate, tmprate;
        unsigned int slots = params_channels(hw_params);
@@ -721,29 +691,29 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
        bool baudclk_is_used;
 
        /* Override slots and slot_width if being specifically set... */
-       if (ssi_private->slots)
-               slots = ssi_private->slots;
+       if (ssi->slots)
+               slots = ssi->slots;
        /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
-       if (ssi_private->slot_width && slots != 2)
-               slot_width = ssi_private->slot_width;
+       if (ssi->slot_width && slots != 2)
+               slot_width = ssi->slot_width;
 
        /* Generate bit clock based on the slot number and slot width */
        freq = slots * slot_width * params_rate(hw_params);
 
        /* Don't apply it to any non-baudclk circumstance */
-       if (IS_ERR(ssi_private->baudclk))
+       if (IS_ERR(ssi->baudclk))
                return -EINVAL;
 
        /*
         * Hardware limitation: The bclk rate must be
         * never greater than 1/5 IPG clock rate
         */
-       if (freq * 5 > clk_get_rate(ssi_private->clk)) {
-               dev_err(cpu_dai->dev, "bitclk > ipgclk/5\n");
+       if (freq * 5 > clk_get_rate(ssi->clk)) {
+               dev_err(dai->dev, "bitclk > ipgclk / 5\n");
                return -EINVAL;
        }
 
-       baudclk_is_used = ssi_private->baudclk_streams & ~(BIT(substream->stream));
+       baudclk_is_used = ssi->baudclk_streams & ~(BIT(substream->stream));
 
        /* It should be already enough to divide clock by setting pm alone */
        psr = 0;
@@ -755,9 +725,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
                tmprate = freq * factor * (i + 1);
 
                if (baudclk_is_used)
-                       clkrate = clk_get_rate(ssi_private->baudclk);
+                       clkrate = clk_get_rate(ssi->baudclk);
                else
-                       clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
+                       clkrate = clk_round_rate(ssi->baudclk, tmprate);
 
                clkrate /= factor;
                afreq = clkrate / (i + 1);
@@ -788,24 +758,22 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
 
        /* No proper pm found if it is still remaining the initial value */
        if (pm == 999) {
-               dev_err(cpu_dai->dev, "failed to handle the required sysclk\n");
+               dev_err(dai->dev, "failed to handle the required sysclk\n");
                return -EINVAL;
        }
 
-       stccr = CCSR_SSI_SxCCR_PM(pm + 1) | (div2 ? CCSR_SSI_SxCCR_DIV2 : 0) |
-               (psr ? CCSR_SSI_SxCCR_PSR : 0);
-       mask = CCSR_SSI_SxCCR_PM_MASK | CCSR_SSI_SxCCR_DIV2 |
-               CCSR_SSI_SxCCR_PSR;
+       stccr = SSI_SxCCR_PM(pm + 1) | (div2 ? SSI_SxCCR_DIV2 : 0) |
+               (psr ? SSI_SxCCR_PSR : 0);
+       mask = SSI_SxCCR_PM_MASK | SSI_SxCCR_DIV2 | SSI_SxCCR_PSR;
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || synchronous)
-               regmap_update_bits(regs, CCSR_SSI_STCCR, mask, stccr);
-       else
-               regmap_update_bits(regs, CCSR_SSI_SRCCR, mask, stccr);
+       /* STCCR is used for RX in synchronous mode */
+       tx2 = tx || synchronous;
+       regmap_update_bits(regs, REG_SSI_SxCCR(tx2), mask, stccr);
 
        if (!baudclk_is_used) {
-               ret = clk_set_rate(ssi_private->baudclk, baudrate);
+               ret = clk_set_rate(ssi->baudclk, baudrate);
                if (ret) {
-                       dev_err(cpu_dai->dev, "failed to set baudclk rate\n");
+                       dev_err(dai->dev, "failed to set baudclk rate\n");
                        return -EINVAL;
                }
        }
@@ -814,185 +782,165 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
 }
 
 /**
- * fsl_ssi_hw_params - program the sample size
- *
- * Most of the SSI registers have been programmed in the startup function,
- * but the word length must be programmed here.  Unfortunately, programming
- * the SxCCR.WL bits requires the SSI to be temporarily disabled.  This can
- * cause a problem with supporting simultaneous playback and capture.  If
- * the SSI is already playing a stream, then that stream may be temporarily
- * stopped when you start capture.
+ * Configure SSI based on PCM hardware parameters
  *
- * Note: The SxCCR.DC and SxCCR.PM bits are only used if the SSI is the
- * clock master.
+ * Notes:
+ * 1) SxCCR.WL bits are critical bits that require SSI to be temporarily
+ *    disabled on offline_config SoCs. Even for online configurable SoCs
+ *    running in synchronous mode (both TX and RX use STCCR), it is not
+ *    safe to re-configure them when both two streams start running.
+ * 2) SxCCR.PM, SxCCR.DIV2 and SxCCR.PSR bits will be configured in the
+ *    fsl_ssi_set_bclk() if SSI is the DAI clock master.
  */
 static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
-       struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai)
+                            struct snd_pcm_hw_params *hw_params,
+                            struct snd_soc_dai *dai)
 {
-       struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
-       struct regmap *regs = ssi_private->regs;
+       bool tx2, tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+       struct regmap *regs = ssi->regs;
        unsigned int channels = params_channels(hw_params);
        unsigned int sample_size = params_width(hw_params);
-       u32 wl = CCSR_SSI_SxCCR_WL(sample_size);
+       u32 wl = SSI_SxCCR_WL(sample_size);
        int ret;
-       u32 scr_val;
+       u32 scr;
        int enabled;
 
-       regmap_read(regs, CCSR_SSI_SCR, &scr_val);
-       enabled = scr_val & CCSR_SSI_SCR_SSIEN;
+       regmap_read(regs, REG_SSI_SCR, &scr);
+       enabled = scr & SSI_SCR_SSIEN;
 
        /*
-        * If we're in synchronous mode, and the SSI is already enabled,
-        * then STCCR is already set properly.
+        * SSI is properly configured if it is enabled and running in
+        * the synchronous mode; Note that AC97 mode is an exception
+        * that should set separate configurations for STCCR and SRCCR
+        * despite running in the synchronous mode.
         */
-       if (enabled && ssi_private->cpu_dai_drv.symmetric_rates)
+       if (enabled && ssi->cpu_dai_drv.symmetric_rates)
                return 0;
 
-       if (fsl_ssi_is_i2s_master(ssi_private)) {
-               ret = fsl_ssi_set_bclk(substream, cpu_dai, hw_params);
+       if (fsl_ssi_is_i2s_master(ssi)) {
+               ret = fsl_ssi_set_bclk(substream, dai, hw_params);
                if (ret)
                        return ret;
 
                /* Do not enable the clock if it is already enabled */
-               if (!(ssi_private->baudclk_streams & BIT(substream->stream))) {
-                       ret = clk_prepare_enable(ssi_private->baudclk);
+               if (!(ssi->baudclk_streams & BIT(substream->stream))) {
+                       ret = clk_prepare_enable(ssi->baudclk);
                        if (ret)
                                return ret;
 
-                       ssi_private->baudclk_streams |= BIT(substream->stream);
+                       ssi->baudclk_streams |= BIT(substream->stream);
                }
        }
 
-       if (!fsl_ssi_is_ac97(ssi_private)) {
-               u8 i2smode;
-               /*
-                * Switch to normal net mode in order to have a frame sync
-                * signal every 32 bits instead of 16 bits
-                */
-               if (fsl_ssi_is_i2s_cbm_cfs(ssi_private) && sample_size == 16)
-                       i2smode = CCSR_SSI_SCR_I2S_MODE_NORMAL |
-                               CCSR_SSI_SCR_NET;
+       if (!fsl_ssi_is_ac97(ssi)) {
+               u8 i2s_net;
+               /* Normal + Network mode to send 16-bit data in 32-bit frames */
+               if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16)
+                       i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET;
                else
-                       i2smode = ssi_private->i2s_mode;
+                       i2s_net = ssi->i2s_net;
 
-               regmap_update_bits(regs, CCSR_SSI_SCR,
-                               CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK,
-                               channels == 1 ? 0 : i2smode);
+               regmap_update_bits(regs, REG_SSI_SCR,
+                                  SSI_SCR_I2S_NET_MASK,
+                                  channels == 1 ? 0 : i2s_net);
        }
 
-       /*
-        * FIXME: The documentation says that SxCCR[WL] should not be
-        * modified while the SSI is enabled.  The only time this can
-        * happen is if we're trying to do simultaneous playback and
-        * capture in asynchronous mode.  Unfortunately, I have been enable
-        * to get that to work at all on the P1022DS.  Therefore, we don't
-        * bother to disable/enable the SSI when setting SxCCR[WL], because
-        * the SSI will stop anyway.  Maybe one day, this will get fixed.
-        */
-
        /* In synchronous mode, the SSI uses STCCR for capture */
-       if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ||
-           ssi_private->cpu_dai_drv.symmetric_rates)
-               regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_WL_MASK,
-                               wl);
-       else
-               regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_WL_MASK,
-                               wl);
+       tx2 = tx || ssi->cpu_dai_drv.symmetric_rates;
+       regmap_update_bits(regs, REG_SSI_SxCCR(tx2), SSI_SxCCR_WL_MASK, wl);
 
        return 0;
 }
 
 static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
-               struct snd_soc_dai *cpu_dai)
+                          struct snd_soc_dai *dai)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct fsl_ssi_private *ssi_private =
-               snd_soc_dai_get_drvdata(rtd->cpu_dai);
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 
-       if (fsl_ssi_is_i2s_master(ssi_private) &&
-                       ssi_private->baudclk_streams & BIT(substream->stream)) {
-               clk_disable_unprepare(ssi_private->baudclk);
-               ssi_private->baudclk_streams &= ~BIT(substream->stream);
+       if (fsl_ssi_is_i2s_master(ssi) &&
+           ssi->baudclk_streams & BIT(substream->stream)) {
+               clk_disable_unprepare(ssi->baudclk);
+               ssi->baudclk_streams &= ~BIT(substream->stream);
        }
 
        return 0;
 }
 
 static int _fsl_ssi_set_dai_fmt(struct device *dev,
-                               struct fsl_ssi_private *ssi_private,
-                               unsigned int fmt)
+                               struct fsl_ssi *ssi, unsigned int fmt)
 {
-       struct regmap *regs = ssi_private->regs;
+       struct regmap *regs = ssi->regs;
        u32 strcr = 0, stcr, srcr, scr, mask;
        u8 wm;
 
-       ssi_private->dai_fmt = fmt;
+       ssi->dai_fmt = fmt;
 
-       if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) {
-               dev_err(dev, "baudclk is missing which is necessary for master mode\n");
+       if (fsl_ssi_is_i2s_master(ssi) && IS_ERR(ssi->baudclk)) {
+               dev_err(dev, "missing baudclk for master mode\n");
                return -EINVAL;
        }
 
-       fsl_ssi_setup_reg_vals(ssi_private);
+       fsl_ssi_setup_regvals(ssi);
 
-       regmap_read(regs, CCSR_SSI_SCR, &scr);
-       scr &= ~(CCSR_SSI_SCR_SYN | CCSR_SSI_SCR_I2S_MODE_MASK);
-       scr |= CCSR_SSI_SCR_SYNC_TX_FS;
+       regmap_read(regs, REG_SSI_SCR, &scr);
+       scr &= ~(SSI_SCR_SYN | SSI_SCR_I2S_MODE_MASK);
+       /* Synchronize frame sync clock for TE to avoid data slipping */
+       scr |= SSI_SCR_SYNC_TX_FS;
 
-       mask = CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR |
-               CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TFSL |
-               CCSR_SSI_STCR_TEFS;
-       regmap_read(regs, CCSR_SSI_STCR, &stcr);
-       regmap_read(regs, CCSR_SSI_SRCR, &srcr);
+       mask = SSI_STCR_TXBIT0 | SSI_STCR_TFDIR | SSI_STCR_TXDIR |
+              SSI_STCR_TSCKP | SSI_STCR_TFSI | SSI_STCR_TFSL | SSI_STCR_TEFS;
+       regmap_read(regs, REG_SSI_STCR, &stcr);
+       regmap_read(regs, REG_SSI_SRCR, &srcr);
        stcr &= ~mask;
        srcr &= ~mask;
 
-       ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
+       /* Use Network mode as default */
+       ssi->i2s_net = SSI_SCR_NET;
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
-               regmap_update_bits(regs, CCSR_SSI_STCCR,
-                                  CCSR_SSI_SxCCR_DC_MASK,
-                                  CCSR_SSI_SxCCR_DC(2));
-               regmap_update_bits(regs, CCSR_SSI_SRCCR,
-                                  CCSR_SSI_SxCCR_DC_MASK,
-                                  CCSR_SSI_SxCCR_DC(2));
+               regmap_update_bits(regs, REG_SSI_STCCR,
+                                  SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
+               regmap_update_bits(regs, REG_SSI_SRCCR,
+                                  SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
                switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
                case SND_SOC_DAIFMT_CBM_CFS:
                case SND_SOC_DAIFMT_CBS_CFS:
-                       ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
+                       ssi->i2s_net |= SSI_SCR_I2S_MODE_MASTER;
                        break;
                case SND_SOC_DAIFMT_CBM_CFM:
-                       ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
+                       ssi->i2s_net |= SSI_SCR_I2S_MODE_SLAVE;
                        break;
                default:
                        return -EINVAL;
                }
 
                /* Data on rising edge of bclk, frame low, 1clk before data */
-               strcr |= CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TSCKP |
-                       CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS;
+               strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP |
+                        SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
                break;
        case SND_SOC_DAIFMT_LEFT_J:
                /* Data on rising edge of bclk, frame high */
-               strcr |= CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TSCKP;
+               strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP;
                break;
        case SND_SOC_DAIFMT_DSP_A:
                /* Data on rising edge of bclk, frame high, 1clk before data */
-               strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
-                       CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS;
+               strcr |= SSI_STCR_TFSL | SSI_STCR_TSCKP |
+                        SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
                break;
        case SND_SOC_DAIFMT_DSP_B:
                /* Data on rising edge of bclk, frame high */
-               strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
-                       CCSR_SSI_STCR_TXBIT0;
+               strcr |= SSI_STCR_TFSL | SSI_STCR_TSCKP | SSI_STCR_TXBIT0;
                break;
        case SND_SOC_DAIFMT_AC97:
-               ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_NORMAL;
+               /* Data on falling edge of bclk, frame high, 1clk before data */
+               ssi->i2s_net |= SSI_SCR_I2S_MODE_NORMAL;
                break;
        default:
                return -EINVAL;
        }
-       scr |= ssi_private->i2s_mode;
+       scr |= ssi->i2s_net;
 
        /* DAI clock inversion */
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -1001,16 +949,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
                break;
        case SND_SOC_DAIFMT_IB_NF:
                /* Invert bit clock */
-               strcr ^= CCSR_SSI_STCR_TSCKP;
+               strcr ^= SSI_STCR_TSCKP;
                break;
        case SND_SOC_DAIFMT_NB_IF:
                /* Invert frame clock */
-               strcr ^= CCSR_SSI_STCR_TFSI;
+               strcr ^= SSI_STCR_TFSI;
                break;
        case SND_SOC_DAIFMT_IB_IF:
                /* Invert both clocks */
-               strcr ^= CCSR_SSI_STCR_TSCKP;
-               strcr ^= CCSR_SSI_STCR_TFSI;
+               strcr ^= SSI_STCR_TSCKP;
+               strcr ^= SSI_STCR_TFSI;
                break;
        default:
                return -EINVAL;
@@ -1019,123 +967,122 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
        /* DAI clock master masks */
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBS_CFS:
-               strcr |= CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR;
-               scr |= CCSR_SSI_SCR_SYS_CLK_EN;
+               /* Output bit and frame sync clocks */
+               strcr |= SSI_STCR_TFDIR | SSI_STCR_TXDIR;
+               scr |= SSI_SCR_SYS_CLK_EN;
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
-               scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+               /* Input bit or frame sync clocks */
+               scr &= ~SSI_SCR_SYS_CLK_EN;
                break;
        case SND_SOC_DAIFMT_CBM_CFS:
-               strcr &= ~CCSR_SSI_STCR_TXDIR;
-               strcr |= CCSR_SSI_STCR_TFDIR;
-               scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+               /* Input bit clock but output frame sync clock */
+               strcr &= ~SSI_STCR_TXDIR;
+               strcr |= SSI_STCR_TFDIR;
+               scr &= ~SSI_SCR_SYS_CLK_EN;
                break;
        default:
-               if (!fsl_ssi_is_ac97(ssi_private))
+               if (!fsl_ssi_is_ac97(ssi))
                        return -EINVAL;
        }
 
        stcr |= strcr;
        srcr |= strcr;
 
-       if (ssi_private->cpu_dai_drv.symmetric_rates
-                       || fsl_ssi_is_ac97(ssi_private)) {
-               /* Need to clear RXDIR when using SYNC or AC97 mode */
-               srcr &= ~CCSR_SSI_SRCR_RXDIR;
-               scr |= CCSR_SSI_SCR_SYN;
+       /* Set SYN mode and clear RXDIR bit when using SYN or AC97 mode */
+       if (ssi->cpu_dai_drv.symmetric_rates || fsl_ssi_is_ac97(ssi)) {
+               srcr &= ~SSI_SRCR_RXDIR;
+               scr |= SSI_SCR_SYN;
        }
 
-       regmap_write(regs, CCSR_SSI_STCR, stcr);
-       regmap_write(regs, CCSR_SSI_SRCR, srcr);
-       regmap_write(regs, CCSR_SSI_SCR, scr);
+       regmap_write(regs, REG_SSI_STCR, stcr);
+       regmap_write(regs, REG_SSI_SRCR, srcr);
+       regmap_write(regs, REG_SSI_SCR, scr);
 
-       wm = ssi_private->fifo_watermark;
+       wm = ssi->fifo_watermark;
 
-       regmap_write(regs, CCSR_SSI_SFCSR,
-                       CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
-                       CCSR_SSI_SFCSR_TFWM1(wm) | CCSR_SSI_SFCSR_RFWM1(wm));
+       regmap_write(regs, REG_SSI_SFCSR,
+                    SSI_SFCSR_TFWM0(wm) | SSI_SFCSR_RFWM0(wm) |
+                    SSI_SFCSR_TFWM1(wm) | SSI_SFCSR_RFWM1(wm));
 
-       if (ssi_private->use_dual_fifo) {
-               regmap_update_bits(regs, CCSR_SSI_SRCR, CCSR_SSI_SRCR_RFEN1,
-                               CCSR_SSI_SRCR_RFEN1);
-               regmap_update_bits(regs, CCSR_SSI_STCR, CCSR_SSI_STCR_TFEN1,
-                               CCSR_SSI_STCR_TFEN1);
-               regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_TCH_EN,
-                               CCSR_SSI_SCR_TCH_EN);
+       if (ssi->use_dual_fifo) {
+               regmap_update_bits(regs, REG_SSI_SRCR,
+                                  SSI_SRCR_RFEN1, SSI_SRCR_RFEN1);
+               regmap_update_bits(regs, REG_SSI_STCR,
+                                  SSI_STCR_TFEN1, SSI_STCR_TFEN1);
+               regmap_update_bits(regs, REG_SSI_SCR,
+                                  SSI_SCR_TCH_EN, SSI_SCR_TCH_EN);
        }
 
        if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97)
-               fsl_ssi_setup_ac97(ssi_private);
+               fsl_ssi_setup_ac97(ssi);
 
        return 0;
-
 }
 
 /**
- * fsl_ssi_set_dai_fmt - configure Digital Audio Interface Format.
+ * Configure Digital Audio Interface (DAI) Format
  */
-static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
-       struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+
+       /* AC97 configured DAIFMT earlier in the probe() */
+       if (fsl_ssi_is_ac97(ssi))
+               return 0;
 
-       return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt);
+       return _fsl_ssi_set_dai_fmt(dai->dev, ssi, fmt);
 }
 
 /**
- * fsl_ssi_set_dai_tdm_slot - set TDM slot number
- *
- * Note: This function can be only called when using SSI as DAI master
+ * Set TDM slot number and slot width
  */
-static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
-                               u32 rx_mask, int slots, int slot_width)
+static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
+                                   u32 rx_mask, int slots, int slot_width)
 {
-       struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
-       struct regmap *regs = ssi_private->regs;
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+       struct regmap *regs = ssi->regs;
        u32 val;
 
        /* The word length should be 8, 10, 12, 16, 18, 20, 22 or 24 */
        if (slot_width & 1 || slot_width < 8 || slot_width > 24) {
-               dev_err(cpu_dai->dev, "invalid slot width: %d\n", slot_width);
+               dev_err(dai->dev, "invalid slot width: %d\n", slot_width);
                return -EINVAL;
        }
 
        /* The slot number should be >= 2 if using Network mode or I2S mode */
-       regmap_read(regs, CCSR_SSI_SCR, &val);
-       val &= CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_NET;
+       regmap_read(regs, REG_SSI_SCR, &val);
+       val &= SSI_SCR_I2S_MODE_MASK | SSI_SCR_NET;
        if (val && slots < 2) {
-               dev_err(cpu_dai->dev, "slot number should be >= 2 in I2S or NET\n");
+               dev_err(dai->dev, "slot number should be >= 2 in I2S or NET\n");
                return -EINVAL;
        }
 
-       regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_DC_MASK,
-                       CCSR_SSI_SxCCR_DC(slots));
-       regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_DC_MASK,
-                       CCSR_SSI_SxCCR_DC(slots));
+       regmap_update_bits(regs, REG_SSI_STCCR,
+                          SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
+       regmap_update_bits(regs, REG_SSI_SRCCR,
+                          SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
 
-       /* The register SxMSKs needs SSI to provide essential clock due to
-        * hardware design. So we here temporarily enable SSI to set them.
-        */
-       regmap_read(regs, CCSR_SSI_SCR, &val);
-       val &= CCSR_SSI_SCR_SSIEN;
-       regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN,
-                       CCSR_SSI_SCR_SSIEN);
+       /* Save SSIEN bit of the SCR register */
+       regmap_read(regs, REG_SSI_SCR, &val);
+       val &= SSI_SCR_SSIEN;
+       /* Temporarily enable SSI to allow SxMSKs to be configurable */
+       regmap_update_bits(regs, REG_SSI_SCR, SSI_SCR_SSIEN, SSI_SCR_SSIEN);
 
-       regmap_write(regs, CCSR_SSI_STMSK, ~tx_mask);
-       regmap_write(regs, CCSR_SSI_SRMSK, ~rx_mask);
+       regmap_write(regs, REG_SSI_STMSK, ~tx_mask);
+       regmap_write(regs, REG_SSI_SRMSK, ~rx_mask);
 
-       regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, val);
+       /* Restore the value of SSIEN bit */
+       regmap_update_bits(regs, REG_SSI_SCR, SSI_SCR_SSIEN, val);
 
-       ssi_private->slot_width = slot_width;
-       ssi_private->slots = slots;
+       ssi->slot_width = slot_width;
+       ssi->slots = slots;
 
        return 0;
 }
 
 /**
- * fsl_ssi_trigger: start and stop the DMA transfer.
- *
- * This function is called by ALSA to start, stop, pause, and resume the DMA
- * transfer of data.
+ * Start or stop SSI and corresponding DMA transaction.
  *
  * The DMA channel is in external master start and pause mode, which
  * means the SSI completely controls the flow of data.
@@ -1144,37 +1091,38 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
                           struct snd_soc_dai *dai)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai);
-       struct regmap *regs = ssi_private->regs;
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+       struct regmap *regs = ssi->regs;
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       fsl_ssi_tx_config(ssi_private, true);
+                       fsl_ssi_tx_config(ssi, true);
                else
-                       fsl_ssi_rx_config(ssi_private, true);
+                       fsl_ssi_rx_config(ssi, true);
                break;
 
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       fsl_ssi_tx_config(ssi_private, false);
+                       fsl_ssi_tx_config(ssi, false);
                else
-                       fsl_ssi_rx_config(ssi_private, false);
+                       fsl_ssi_rx_config(ssi, false);
                break;
 
        default:
                return -EINVAL;
        }
 
-       if (fsl_ssi_is_ac97(ssi_private)) {
+       /* Clear corresponding FIFO */
+       if (fsl_ssi_is_ac97(ssi)) {
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_TX_CLR);
+                       regmap_write(regs, REG_SSI_SOR, SSI_SOR_TX_CLR);
                else
-                       regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_RX_CLR);
+                       regmap_write(regs, REG_SSI_SOR, SSI_SOR_RX_CLR);
        }
 
        return 0;
@@ -1182,27 +1130,26 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
 
 static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
 {
-       struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
+       struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
 
-       if (ssi_private->soc->imx && ssi_private->use_dma) {
-               dai->playback_dma_data = &ssi_private->dma_params_tx;
-               dai->capture_dma_data = &ssi_private->dma_params_rx;
+       if (ssi->soc->imx && ssi->use_dma) {
+               dai->playback_dma_data = &ssi->dma_params_tx;
+               dai->capture_dma_data = &ssi->dma_params_rx;
        }
 
        return 0;
 }
 
 static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
-       .startup        = fsl_ssi_startup,
-       .shutdown       = fsl_ssi_shutdown,
-       .hw_params      = fsl_ssi_hw_params,
-       .hw_free        = fsl_ssi_hw_free,
-       .set_fmt        = fsl_ssi_set_dai_fmt,
-       .set_tdm_slot   = fsl_ssi_set_dai_tdm_slot,
-       .trigger        = fsl_ssi_trigger,
+       .startup = fsl_ssi_startup,
+       .shutdown = fsl_ssi_shutdown,
+       .hw_params = fsl_ssi_hw_params,
+       .hw_free = fsl_ssi_hw_free,
+       .set_fmt = fsl_ssi_set_dai_fmt,
+       .set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
+       .trigger = fsl_ssi_trigger,
 };
 
-/* Template for the CPU dai driver structure */
 static struct snd_soc_dai_driver fsl_ssi_dai_template = {
        .probe = fsl_ssi_dai_probe,
        .playback = {
@@ -1223,7 +1170,7 @@ static struct snd_soc_dai_driver fsl_ssi_dai_template = {
 };
 
 static const struct snd_soc_component_driver fsl_ssi_component = {
-       .name           = "fsl-ssi",
+       .name = "fsl-ssi",
 };
 
 static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
@@ -1234,23 +1181,23 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_8000_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE,
+               .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
        },
        .capture = {
                .stream_name = "AC97 Capture",
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE,
+               /* 16-bit capture is broken (errata ERR003778) */
+               .formats = SNDRV_PCM_FMTBIT_S20,
        },
        .ops = &fsl_ssi_dai_ops,
 };
 
-
-static struct fsl_ssi_private *fsl_ac97_data;
+static struct fsl_ssi *fsl_ac97_data;
 
 static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
-               unsigned short val)
+                              unsigned short val)
 {
        struct regmap *regs = fsl_ac97_data->regs;
        unsigned int lreg;
@@ -1260,61 +1207,68 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
        if (reg > 0x7f)
                return;
 
+       mutex_lock(&fsl_ac97_data->ac97_reg_lock);
+
        ret = clk_prepare_enable(fsl_ac97_data->clk);
        if (ret) {
                pr_err("ac97 write clk_prepare_enable failed: %d\n",
                        ret);
-               return;
+               goto ret_unlock;
        }
 
        lreg = reg <<  12;
-       regmap_write(regs, CCSR_SSI_SACADD, lreg);
+       regmap_write(regs, REG_SSI_SACADD, lreg);
 
        lval = val << 4;
-       regmap_write(regs, CCSR_SSI_SACDAT, lval);
+       regmap_write(regs, REG_SSI_SACDAT, lval);
 
-       regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
-                       CCSR_SSI_SACNT_WR);
+       regmap_update_bits(regs, REG_SSI_SACNT,
+                          SSI_SACNT_RDWR_MASK, SSI_SACNT_WR);
        udelay(100);
 
        clk_disable_unprepare(fsl_ac97_data->clk);
+
+ret_unlock:
+       mutex_unlock(&fsl_ac97_data->ac97_reg_lock);
 }
 
 static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
-               unsigned short reg)
+                                       unsigned short reg)
 {
        struct regmap *regs = fsl_ac97_data->regs;
-
-       unsigned short val = -1;
+       unsigned short val = 0;
        u32 reg_val;
        unsigned int lreg;
        int ret;
 
+       mutex_lock(&fsl_ac97_data->ac97_reg_lock);
+
        ret = clk_prepare_enable(fsl_ac97_data->clk);
        if (ret) {
-               pr_err("ac97 read clk_prepare_enable failed: %d\n",
-                       ret);
-               return -1;
+               pr_err("ac97 read clk_prepare_enable failed: %d\n", ret);
+               goto ret_unlock;
        }
 
        lreg = (reg & 0x7f) <<  12;
-       regmap_write(regs, CCSR_SSI_SACADD, lreg);
-       regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
-                       CCSR_SSI_SACNT_RD);
+       regmap_write(regs, REG_SSI_SACADD, lreg);
+       regmap_update_bits(regs, REG_SSI_SACNT,
+                          SSI_SACNT_RDWR_MASK, SSI_SACNT_RD);
 
        udelay(100);
 
-       regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
+       regmap_read(regs, REG_SSI_SACDAT, &reg_val);
        val = (reg_val >> 4) & 0xffff;
 
        clk_disable_unprepare(fsl_ac97_data->clk);
 
+ret_unlock:
+       mutex_unlock(&fsl_ac97_data->ac97_reg_lock);
        return val;
 }
 
 static struct snd_ac97_bus_ops fsl_ssi_ac97_ops = {
-       .read           = fsl_ssi_ac97_read,
-       .write          = fsl_ssi_ac97_write,
+       .read = fsl_ssi_ac97_read,
+       .write = fsl_ssi_ac97_write,
 };
 
 /**
@@ -1329,70 +1283,67 @@ static void make_lowercase(char *s)
 }
 
 static int fsl_ssi_imx_probe(struct platform_device *pdev,
-               struct fsl_ssi_private *ssi_private, void __iomem *iomem)
+                            struct fsl_ssi *ssi, void __iomem *iomem)
 {
        struct device_node *np = pdev->dev.of_node;
+       struct device *dev = &pdev->dev;
        u32 dmas[4];
        int ret;
 
-       if (ssi_private->has_ipg_clk_name)
-               ssi_private->clk = devm_clk_get(&pdev->dev, "ipg");
+       /* Backward compatible for a DT without ipg clock name assigned */
+       if (ssi->has_ipg_clk_name)
+               ssi->clk = devm_clk_get(dev, "ipg");
        else
-               ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(ssi_private->clk)) {
-               ret = PTR_ERR(ssi_private->clk);
-               dev_err(&pdev->dev, "could not get clock: %d\n", ret);
+               ssi->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(ssi->clk)) {
+               ret = PTR_ERR(ssi->clk);
+               dev_err(dev, "failed to get clock: %d\n", ret);
                return ret;
        }
 
-       if (!ssi_private->has_ipg_clk_name) {
-               ret = clk_prepare_enable(ssi_private->clk);
+       /* Enable the clock since regmap will not handle it in this case */
+       if (!ssi->has_ipg_clk_name) {
+               ret = clk_prepare_enable(ssi->clk);
                if (ret) {
-                       dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+                       dev_err(dev, "clk_prepare_enable failed: %d\n", ret);
                        return ret;
                }
        }
 
-       /* For those SLAVE implementations, we ignore non-baudclk cases
-        * and, instead, abandon MASTER mode that needs baud clock.
-        */
-       ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
-       if (IS_ERR(ssi_private->baudclk))
-               dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
-                        PTR_ERR(ssi_private->baudclk));
+       /* Do not error out for slave cases that live without a baud clock */
+       ssi->baudclk = devm_clk_get(dev, "baud");
+       if (IS_ERR(ssi->baudclk))
+               dev_dbg(dev, "failed to get baud clock: %ld\n",
+                        PTR_ERR(ssi->baudclk));
 
-       ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
-       ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
-       ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
-       ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
+       ssi->dma_params_tx.maxburst = ssi->dma_maxburst;
+       ssi->dma_params_rx.maxburst = ssi->dma_maxburst;
+       ssi->dma_params_tx.addr = ssi->ssi_phys + REG_SSI_STX0;
+       ssi->dma_params_rx.addr = ssi->ssi_phys + REG_SSI_SRX0;
 
+       /* Set to dual FIFO mode according to the SDMA sciprt */
        ret = of_property_read_u32_array(np, "dmas", dmas, 4);
-       if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
-               ssi_private->use_dual_fifo = true;
-               /* When using dual fifo mode, we need to keep watermark
-                * as even numbers due to dma script limitation.
+       if (ssi->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+               ssi->use_dual_fifo = true;
+               /*
+                * Use even numbers to avoid channel swap due to SDMA
+                * script design
                 */
-               ssi_private->dma_params_tx.maxburst &= ~0x1;
-               ssi_private->dma_params_rx.maxburst &= ~0x1;
+               ssi->dma_params_tx.maxburst &= ~0x1;
+               ssi->dma_params_rx.maxburst &= ~0x1;
        }
 
-       if (!ssi_private->use_dma) {
-
+       if (!ssi->use_dma) {
                /*
-                * Some boards use an incompatible codec. To get it
-                * working, we are using imx-fiq-pcm-audio, that
-                * can handle those codecs. DMA is not possible in this
-                * situation.
+                * Some boards use an incompatible codec. Use imx-fiq-pcm-audio
+                * to get it working, as DMA is not possible in this situation.
                 */
+               ssi->fiq_params.irq = ssi->irq;
+               ssi->fiq_params.base = iomem;
+               ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
+               ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
 
-               ssi_private->fiq_params.irq = ssi_private->irq;
-               ssi_private->fiq_params.base = iomem;
-               ssi_private->fiq_params.dma_params_rx =
-                       &ssi_private->dma_params_rx;
-               ssi_private->fiq_params.dma_params_tx =
-                       &ssi_private->dma_params_tx;
-
-               ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params);
+               ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
                if (ret)
                        goto error_pcm;
        } else {
@@ -1404,26 +1355,26 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
        return 0;
 
 error_pcm:
+       if (!ssi->has_ipg_clk_name)
+               clk_disable_unprepare(ssi->clk);
 
-       if (!ssi_private->has_ipg_clk_name)
-               clk_disable_unprepare(ssi_private->clk);
        return ret;
 }
 
-static void fsl_ssi_imx_clean(struct platform_device *pdev,
-               struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_imx_clean(struct platform_device *pdev, struct fsl_ssi *ssi)
 {
-       if (!ssi_private->use_dma)
+       if (!ssi->use_dma)
                imx_pcm_fiq_exit(pdev);
-       if (!ssi_private->has_ipg_clk_name)
-               clk_disable_unprepare(ssi_private->clk);
+       if (!ssi->has_ipg_clk_name)
+               clk_disable_unprepare(ssi->clk);
 }
 
 static int fsl_ssi_probe(struct platform_device *pdev)
 {
-       struct fsl_ssi_private *ssi_private;
+       struct fsl_ssi *ssi;
        int ret = 0;
        struct device_node *np = pdev->dev.of_node;
+       struct device *dev = &pdev->dev;
        const struct of_device_id *of_id;
        const char *p, *sprop;
        const uint32_t *iprop;
@@ -1432,182 +1383,159 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        char name[64];
        struct regmap_config regconfig = fsl_ssi_regconfig;
 
-       of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
+       of_id = of_match_device(fsl_ssi_ids, dev);
        if (!of_id || !of_id->data)
                return -EINVAL;
 
-       ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private),
-                       GFP_KERNEL);
-       if (!ssi_private)
+       ssi = devm_kzalloc(dev, sizeof(*ssi), GFP_KERNEL);
+       if (!ssi)
                return -ENOMEM;
 
-       ssi_private->soc = of_id->data;
-       ssi_private->dev = &pdev->dev;
+       ssi->soc = of_id->data;
+       ssi->dev = dev;
 
+       /* Check if being used in AC97 mode */
        sprop = of_get_property(np, "fsl,mode", NULL);
        if (sprop) {
                if (!strcmp(sprop, "ac97-slave"))
-                       ssi_private->dai_fmt = SND_SOC_DAIFMT_AC97;
+                       ssi->dai_fmt = SND_SOC_DAIFMT_AC97;
        }
 
-       ssi_private->use_dma = !of_property_read_bool(np,
-                       "fsl,fiq-stream-filter");
-
-       if (fsl_ssi_is_ac97(ssi_private)) {
-               memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_ac97_dai,
-                               sizeof(fsl_ssi_ac97_dai));
-
-               fsl_ac97_data = ssi_private;
+       /* Select DMA or FIQ */
+       ssi->use_dma = !of_property_read_bool(np, "fsl,fiq-stream-filter");
 
-               ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
-               if (ret) {
-                       dev_err(&pdev->dev, "could not set AC'97 ops\n");
-                       return ret;
-               }
+       if (fsl_ssi_is_ac97(ssi)) {
+               memcpy(&ssi->cpu_dai_drv, &fsl_ssi_ac97_dai,
+                      sizeof(fsl_ssi_ac97_dai));
+               fsl_ac97_data = ssi;
        } else {
-               /* Initialize this copy of the CPU DAI driver structure */
-               memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
+               memcpy(&ssi->cpu_dai_drv, &fsl_ssi_dai_template,
                       sizeof(fsl_ssi_dai_template));
        }
-       ssi_private->cpu_dai_drv.name = dev_name(&pdev->dev);
+       ssi->cpu_dai_drv.name = dev_name(dev);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       iomem = devm_ioremap_resource(&pdev->dev, res);
+       iomem = devm_ioremap_resource(dev, res);
        if (IS_ERR(iomem))
                return PTR_ERR(iomem);
-       ssi_private->ssi_phys = res->start;
+       ssi->ssi_phys = res->start;
 
-       if (ssi_private->soc->imx21regs) {
-               /*
-                * According to datasheet imx21-class SSI
-                * don't have SACC{ST,EN,DIS} regs.
-                */
-               regconfig.max_register = CCSR_SSI_SRMSK;
+       if (ssi->soc->imx21regs) {
+               /* No SACC{ST,EN,DIS} regs in imx21-class SSI */
+               regconfig.max_register = REG_SSI_SRMSK;
                regconfig.num_reg_defaults_raw =
-                       CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
+                       REG_SSI_SRMSK / sizeof(uint32_t) + 1;
        }
 
        ret = of_property_match_string(np, "clock-names", "ipg");
        if (ret < 0) {
-               ssi_private->has_ipg_clk_name = false;
-               ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
-                       &regconfig);
+               ssi->has_ipg_clk_name = false;
+               ssi->regs = devm_regmap_init_mmio(dev, iomem, &regconfig);
        } else {
-               ssi_private->has_ipg_clk_name = true;
-               ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
-                       "ipg", iomem, &regconfig);
+               ssi->has_ipg_clk_name = true;
+               ssi->regs = devm_regmap_init_mmio_clk(dev, "ipg", iomem,
+                                                     &regconfig);
        }
-       if (IS_ERR(ssi_private->regs)) {
-               dev_err(&pdev->dev, "Failed to init register map\n");
-               return PTR_ERR(ssi_private->regs);
+       if (IS_ERR(ssi->regs)) {
+               dev_err(dev, "failed to init register map\n");
+               return PTR_ERR(ssi->regs);
        }
 
-       ssi_private->irq = platform_get_irq(pdev, 0);
-       if (ssi_private->irq < 0) {
-               dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
-               return ssi_private->irq;
+       ssi->irq = platform_get_irq(pdev, 0);
+       if (ssi->irq < 0) {
+               dev_err(dev, "no irq for node %s\n", pdev->name);
+               return ssi->irq;
        }
 
-       /* Are the RX and the TX clocks locked? */
+       /* Set software limitations for synchronous mode */
        if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
-               if (!fsl_ssi_is_ac97(ssi_private))
-                       ssi_private->cpu_dai_drv.symmetric_rates = 1;
+               if (!fsl_ssi_is_ac97(ssi)) {
+                       ssi->cpu_dai_drv.symmetric_rates = 1;
+                       ssi->cpu_dai_drv.symmetric_samplebits = 1;
+               }
 
-               ssi_private->cpu_dai_drv.symmetric_channels = 1;
-               ssi_private->cpu_dai_drv.symmetric_samplebits = 1;
+               ssi->cpu_dai_drv.symmetric_channels = 1;
        }
 
-       /* Determine the FIFO depth. */
+       /* Fetch FIFO depth; Set to 8 for older DT without this property */
        iprop = of_get_property(np, "fsl,fifo-depth", NULL);
        if (iprop)
-               ssi_private->fifo_depth = be32_to_cpup(iprop);
+               ssi->fifo_depth = be32_to_cpup(iprop);
        else
-                /* Older 8610 DTs didn't have the fifo-depth property */
-               ssi_private->fifo_depth = 8;
+               ssi->fifo_depth = 8;
 
        /*
-        * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
-        * use FIFO 1 but set the watermark appropriately nontheless.
-        * We program the transmit water to signal a DMA transfer
-        * if there are N elements left in the FIFO. For chips with 15-deep
-        * FIFOs, set watermark to 8.  This allows the SSI to operate at a
-        * high data rate without channel slipping. Behavior is unchanged
-        * for the older chips with a fifo depth of only 8.  A value of 4
-        * might be appropriate for the older chips, but is left at
-        * fifo_depth-2 until sombody has a chance to test.
+        * Configure TX and RX DMA watermarks -- when to send a DMA request
         *
-        * We set the watermark on the same level as the DMA burstsize.  For
-        * fiq it is probably better to use the biggest possible watermark
-        * size.
+        * Values should be tested to avoid FIFO under/over run. Set maxburst
+        * to fifo_watermark to maxiumize DMA transaction to reduce overhead.
         */
-       switch (ssi_private->fifo_depth) {
+       switch (ssi->fifo_depth) {
        case 15:
                /*
-                * 2 samples is not enough when running at high data
-                * rates (like 48kHz @ 16 bits/channel, 16 channels)
-                * 8 seems to split things evenly and leave enough time
-                * for the DMA to fill the FIFO before it's over/under
-                * run.
+                * Set to 8 as a balanced configuration -- When TX FIFO has 8
+                * empty slots, send a DMA request to fill these 8 slots. The
+                * remaining 7 slots should be able to allow DMA to finish the
+                * transaction before TX FIFO underruns; Same applies to RX.
+                *
+                * Tested with cases running at 48kHz @ 16 bits x 16 channels
                 */
-               ssi_private->fifo_watermark = 8;
-               ssi_private->dma_maxburst = 8;
+               ssi->fifo_watermark = 8;
+               ssi->dma_maxburst = 8;
                break;
        case 8:
        default:
-               /*
-                * maintain old behavior for older chips.
-                * Keeping it the same because I don't have an older
-                * board to test with.
-                * I suspect this could be changed to be something to
-                * leave some more space in the fifo.
-                */
-               ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
-               ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
+               /* Safely use old watermark configurations for older chips */
+               ssi->fifo_watermark = ssi->fifo_depth - 2;
+               ssi->dma_maxburst = ssi->fifo_depth - 2;
                break;
        }
 
-       dev_set_drvdata(&pdev->dev, ssi_private);
+       dev_set_drvdata(dev, ssi);
 
-       if (ssi_private->soc->imx) {
-               ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem);
+       if (ssi->soc->imx) {
+               ret = fsl_ssi_imx_probe(pdev, ssi, iomem);
                if (ret)
                        return ret;
        }
 
-       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
-                                             &ssi_private->cpu_dai_drv, 1);
+       if (fsl_ssi_is_ac97(ssi)) {
+               mutex_init(&ssi->ac97_reg_lock);
+               ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+               if (ret) {
+                       dev_err(dev, "failed to set AC'97 ops\n");
+                       goto error_ac97_ops;
+               }
+       }
+
+       ret = devm_snd_soc_register_component(dev, &fsl_ssi_component,
+                                             &ssi->cpu_dai_drv, 1);
        if (ret) {
-               dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+               dev_err(dev, "failed to register DAI: %d\n", ret);
                goto error_asoc_register;
        }
 
-       if (ssi_private->use_dma) {
-               ret = devm_request_irq(&pdev->dev, ssi_private->irq,
-                                       fsl_ssi_isr, 0, dev_name(&pdev->dev),
-                                       ssi_private);
+       if (ssi->use_dma) {
+               ret = devm_request_irq(dev, ssi->irq, fsl_ssi_isr, 0,
+                                      dev_name(dev), ssi);
                if (ret < 0) {
-                       dev_err(&pdev->dev, "could not claim irq %u\n",
-                                       ssi_private->irq);
+                       dev_err(dev, "failed to claim irq %u\n", ssi->irq);
                        goto error_asoc_register;
                }
        }
 
-       ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev);
+       ret = fsl_ssi_debugfs_create(&ssi->dbg_stats, dev);
        if (ret)
                goto error_asoc_register;
 
-       /*
-        * If codec-handle property is missing from SSI node, we assume
-        * that the machine driver uses new binding which does not require
-        * SSI driver to trigger machine driver's probe.
-        */
+       /* Bypass it if using newer DT bindings of ASoC machine drivers */
        if (!of_get_property(np, "codec-handle", NULL))
                goto done;
 
-       /* Trigger the machine driver's probe function.  The platform driver
-        * name of the machine driver is taken from /compatible property of the
-        * device tree.  We also pass the address of the CPU DAI driver
-        * structure.
+       /*
+        * Backward compatible for older bindings by manually triggering the
+        * machine driver's probe(). Use /compatible property, including the
+        * address of CPU DAI driver structure, as the name of machine driver.
         */
        sprop = of_get_property(of_find_node_by_path("/"), "compatible", NULL);
        /* Sometimes the compatible name has a "fsl," prefix, so we strip it. */
@@ -1617,34 +1545,31 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        snprintf(name, sizeof(name), "snd-soc-%s", sprop);
        make_lowercase(name);
 
-       ssi_private->pdev =
-               platform_device_register_data(&pdev->dev, name, 0, NULL, 0);
-       if (IS_ERR(ssi_private->pdev)) {
-               ret = PTR_ERR(ssi_private->pdev);
-               dev_err(&pdev->dev, "failed to register platform: %d\n", ret);
+       ssi->pdev = platform_device_register_data(dev, name, 0, NULL, 0);
+       if (IS_ERR(ssi->pdev)) {
+               ret = PTR_ERR(ssi->pdev);
+               dev_err(dev, "failed to register platform: %d\n", ret);
                goto error_sound_card;
        }
 
 done:
-       if (ssi_private->dai_fmt)
-               _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
-                                    ssi_private->dai_fmt);
+       if (ssi->dai_fmt)
+               _fsl_ssi_set_dai_fmt(dev, ssi, ssi->dai_fmt);
 
-       if (fsl_ssi_is_ac97(ssi_private)) {
+       if (fsl_ssi_is_ac97(ssi)) {
                u32 ssi_idx;
 
                ret = of_property_read_u32(np, "cell-index", &ssi_idx);
                if (ret) {
-                       dev_err(&pdev->dev, "cannot get SSI index property\n");
+                       dev_err(dev, "failed to get SSI index property\n");
                        goto error_sound_card;
                }
 
-               ssi_private->pdev =
-                       platform_device_register_data(NULL,
-                                       "ac97-codec", ssi_idx, NULL, 0);
-               if (IS_ERR(ssi_private->pdev)) {
-                       ret = PTR_ERR(ssi_private->pdev);
-                       dev_err(&pdev->dev,
+               ssi->pdev = platform_device_register_data(NULL, "ac97-codec",
+                                                         ssi_idx, NULL, 0);
+               if (IS_ERR(ssi->pdev)) {
+                       ret = PTR_ERR(ssi->pdev);
+                       dev_err(dev,
                                "failed to register AC97 codec platform: %d\n",
                                ret);
                        goto error_sound_card;
@@ -1654,29 +1579,36 @@ done:
        return 0;
 
 error_sound_card:
-       fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
-
+       fsl_ssi_debugfs_remove(&ssi->dbg_stats);
 error_asoc_register:
-       if (ssi_private->soc->imx)
-               fsl_ssi_imx_clean(pdev, ssi_private);
+       if (fsl_ssi_is_ac97(ssi))
+               snd_soc_set_ac97_ops(NULL);
+error_ac97_ops:
+       if (fsl_ssi_is_ac97(ssi))
+               mutex_destroy(&ssi->ac97_reg_lock);
+
+       if (ssi->soc->imx)
+               fsl_ssi_imx_clean(pdev, ssi);
 
        return ret;
 }
 
 static int fsl_ssi_remove(struct platform_device *pdev)
 {
-       struct fsl_ssi_private *ssi_private = dev_get_drvdata(&pdev->dev);
+       struct fsl_ssi *ssi = dev_get_drvdata(&pdev->dev);
 
-       fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
+       fsl_ssi_debugfs_remove(&ssi->dbg_stats);
 
-       if (ssi_private->pdev)
-               platform_device_unregister(ssi_private->pdev);
+       if (ssi->pdev)
+               platform_device_unregister(ssi->pdev);
 
-       if (ssi_private->soc->imx)
-               fsl_ssi_imx_clean(pdev, ssi_private);
+       if (ssi->soc->imx)
+               fsl_ssi_imx_clean(pdev, ssi);
 
-       if (fsl_ssi_is_ac97(ssi_private))
+       if (fsl_ssi_is_ac97(ssi)) {
                snd_soc_set_ac97_ops(NULL);
+               mutex_destroy(&ssi->ac97_reg_lock);
+       }
 
        return 0;
 }
@@ -1684,13 +1616,11 @@ static int fsl_ssi_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int fsl_ssi_suspend(struct device *dev)
 {
-       struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev);
-       struct regmap *regs = ssi_private->regs;
+       struct fsl_ssi *ssi = dev_get_drvdata(dev);
+       struct regmap *regs = ssi->regs;
 
-       regmap_read(regs, CCSR_SSI_SFCSR,
-                       &ssi_private->regcache_sfcsr);
-       regmap_read(regs, CCSR_SSI_SACNT,
-                       &ssi_private->regcache_sacnt);
+       regmap_read(regs, REG_SSI_SFCSR, &ssi->regcache_sfcsr);
+       regmap_read(regs, REG_SSI_SACNT, &ssi->regcache_sacnt);
 
        regcache_cache_only(regs, true);
        regcache_mark_dirty(regs);
@@ -1700,17 +1630,16 @@ static int fsl_ssi_suspend(struct device *dev)
 
 static int fsl_ssi_resume(struct device *dev)
 {
-       struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev);
-       struct regmap *regs = ssi_private->regs;
+       struct fsl_ssi *ssi = dev_get_drvdata(dev);
+       struct regmap *regs = ssi->regs;
 
        regcache_cache_only(regs, false);
 
-       regmap_update_bits(regs, CCSR_SSI_SFCSR,
-                       CCSR_SSI_SFCSR_RFWM1_MASK | CCSR_SSI_SFCSR_TFWM1_MASK |
-                       CCSR_SSI_SFCSR_RFWM0_MASK | CCSR_SSI_SFCSR_TFWM0_MASK,
-                       ssi_private->regcache_sfcsr);
-       regmap_write(regs, CCSR_SSI_SACNT,
-                       ssi_private->regcache_sacnt);
+       regmap_update_bits(regs, REG_SSI_SFCSR,
+                          SSI_SFCSR_RFWM1_MASK | SSI_SFCSR_TFWM1_MASK |
+                          SSI_SFCSR_RFWM0_MASK | SSI_SFCSR_TFWM0_MASK,
+                          ssi->regcache_sfcsr);
+       regmap_write(regs, REG_SSI_SACNT, ssi->regcache_sacnt);
 
        return regcache_sync(regs);
 }
index 5065105..de2fdc5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * fsl_ssi.h - ALSA SSI interface for the Freescale MPC8610 SoC
+ * fsl_ssi.h - ALSA SSI interface for the Freescale MPC8610 and i.MX SoC
  *
  * Author: Timur Tabi <timur@freescale.com>
  *
 #ifndef _MPC8610_I2S_H
 #define _MPC8610_I2S_H
 
-/* SSI registers */
-#define CCSR_SSI_STX0                  0x00
-#define CCSR_SSI_STX1                  0x04
-#define CCSR_SSI_SRX0                  0x08
-#define CCSR_SSI_SRX1                  0x0c
-#define CCSR_SSI_SCR                   0x10
-#define CCSR_SSI_SISR                  0x14
-#define CCSR_SSI_SIER                  0x18
-#define CCSR_SSI_STCR                  0x1c
-#define CCSR_SSI_SRCR                  0x20
-#define CCSR_SSI_STCCR                 0x24
-#define CCSR_SSI_SRCCR                 0x28
-#define CCSR_SSI_SFCSR                 0x2c
-#define CCSR_SSI_STR                   0x30
-#define CCSR_SSI_SOR                   0x34
-#define CCSR_SSI_SACNT                 0x38
-#define CCSR_SSI_SACADD                        0x3c
-#define CCSR_SSI_SACDAT                        0x40
-#define CCSR_SSI_SATAG                 0x44
-#define CCSR_SSI_STMSK                 0x48
-#define CCSR_SSI_SRMSK                 0x4c
-#define CCSR_SSI_SACCST                        0x50
-#define CCSR_SSI_SACCEN                        0x54
-#define CCSR_SSI_SACCDIS               0x58
+#define RX 0
+#define TX 1
 
-#define CCSR_SSI_SCR_SYNC_TX_FS                0x00001000
-#define CCSR_SSI_SCR_RFR_CLK_DIS       0x00000800
-#define CCSR_SSI_SCR_TFR_CLK_DIS       0x00000400
-#define CCSR_SSI_SCR_TCH_EN            0x00000100
-#define CCSR_SSI_SCR_SYS_CLK_EN                0x00000080
-#define CCSR_SSI_SCR_I2S_MODE_MASK     0x00000060
-#define CCSR_SSI_SCR_I2S_MODE_NORMAL   0x00000000
-#define CCSR_SSI_SCR_I2S_MODE_MASTER   0x00000020
-#define CCSR_SSI_SCR_I2S_MODE_SLAVE    0x00000040
-#define CCSR_SSI_SCR_SYN               0x00000010
-#define CCSR_SSI_SCR_NET               0x00000008
-#define CCSR_SSI_SCR_RE                        0x00000004
-#define CCSR_SSI_SCR_TE                        0x00000002
-#define CCSR_SSI_SCR_SSIEN             0x00000001
+/* -- SSI Register Map -- */
 
-#define CCSR_SSI_SISR_RFRC             0x01000000
-#define CCSR_SSI_SISR_TFRC             0x00800000
-#define CCSR_SSI_SISR_CMDAU            0x00040000
-#define CCSR_SSI_SISR_CMDDU            0x00020000
-#define CCSR_SSI_SISR_RXT              0x00010000
-#define CCSR_SSI_SISR_RDR1             0x00008000
-#define CCSR_SSI_SISR_RDR0             0x00004000
-#define CCSR_SSI_SISR_TDE1             0x00002000
-#define CCSR_SSI_SISR_TDE0             0x00001000
-#define CCSR_SSI_SISR_ROE1             0x00000800
-#define CCSR_SSI_SISR_ROE0             0x00000400
-#define CCSR_SSI_SISR_TUE1             0x00000200
-#define CCSR_SSI_SISR_TUE0             0x00000100
-#define CCSR_SSI_SISR_TFS              0x00000080
-#define CCSR_SSI_SISR_RFS              0x00000040
-#define CCSR_SSI_SISR_TLS              0x00000020
-#define CCSR_SSI_SISR_RLS              0x00000010
-#define CCSR_SSI_SISR_RFF1             0x00000008
-#define CCSR_SSI_SISR_RFF0             0x00000004
-#define CCSR_SSI_SISR_TFE1             0x00000002
-#define CCSR_SSI_SISR_TFE0             0x00000001
+/* SSI Transmit Data Register 0 */
+#define REG_SSI_STX0                   0x00
+/* SSI Transmit Data Register 1 */
+#define REG_SSI_STX1                   0x04
+/* SSI Receive Data Register 0 */
+#define REG_SSI_SRX0                   0x08
+/* SSI Receive Data Register 1 */
+#define REG_SSI_SRX1                   0x0c
+/* SSI Control Register */
+#define REG_SSI_SCR                    0x10
+/* SSI Interrupt Status Register */
+#define REG_SSI_SISR                   0x14
+/* SSI Interrupt Enable Register */
+#define REG_SSI_SIER                   0x18
+/* SSI Transmit Configuration Register */
+#define REG_SSI_STCR                   0x1c
+/* SSI Receive Configuration Register */
+#define REG_SSI_SRCR                   0x20
+#define REG_SSI_SxCR(tx)               ((tx) ? REG_SSI_STCR : REG_SSI_SRCR)
+/* SSI Transmit Clock Control Register */
+#define REG_SSI_STCCR                  0x24
+/* SSI Receive Clock Control Register */
+#define REG_SSI_SRCCR                  0x28
+#define REG_SSI_SxCCR(tx)              ((tx) ? REG_SSI_STCCR : REG_SSI_SRCCR)
+/* SSI FIFO Control/Status Register */
+#define REG_SSI_SFCSR                  0x2c
+/*
+ * SSI Test Register (Intended for debugging purposes only)
+ *
+ * Note: STR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.14
+ */
+#define REG_SSI_STR                    0x30
+/*
+ * SSI Option Register (Intended for internal use only)
+ *
+ * Note: SOR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.15
+ */
+#define REG_SSI_SOR                    0x34
+/* SSI AC97 Control Register */
+#define REG_SSI_SACNT                  0x38
+/* SSI AC97 Command Address Register */
+#define REG_SSI_SACADD                 0x3c
+/* SSI AC97 Command Data Register */
+#define REG_SSI_SACDAT                 0x40
+/* SSI AC97 Tag Register */
+#define REG_SSI_SATAG                  0x44
+/* SSI Transmit Time Slot Mask Register */
+#define REG_SSI_STMSK                  0x48
+/* SSI  Receive Time Slot Mask Register */
+#define REG_SSI_SRMSK                  0x4c
+#define REG_SSI_SxMSK(tx)              ((tx) ? REG_SSI_STMSK : REG_SSI_SRMSK)
+/*
+ * SSI AC97 Channel Status Register
+ *
+ * The status could be changed by:
+ * 1) Writing a '1' bit at some position in SACCEN sets relevant bit in SACCST
+ * 2) Writing a '1' bit at some position in SACCDIS unsets the relevant bit
+ * 3) Receivng a '1' in SLOTREQ bit from external CODEC via AC Link
+ */
+#define REG_SSI_SACCST                 0x50
+/* SSI AC97 Channel Enable Register -- Set bits in SACCST */
+#define REG_SSI_SACCEN                 0x54
+/* SSI AC97 Channel Disable Register -- Clear bits in SACCST */
+#define REG_SSI_SACCDIS                        0x58
+
+/* -- SSI Register Field Maps -- */
 
-#define CCSR_SSI_SIER_RFRC_EN          0x01000000
-#define CCSR_SSI_SIER_TFRC_EN          0x00800000
-#define CCSR_SSI_SIER_RDMAE            0x00400000
-#define CCSR_SSI_SIER_RIE              0x00200000
-#define CCSR_SSI_SIER_TDMAE            0x00100000
-#define CCSR_SSI_SIER_TIE              0x00080000
-#define CCSR_SSI_SIER_CMDAU_EN         0x00040000
-#define CCSR_SSI_SIER_CMDDU_EN         0x00020000
-#define CCSR_SSI_SIER_RXT_EN           0x00010000
-#define CCSR_SSI_SIER_RDR1_EN          0x00008000
-#define CCSR_SSI_SIER_RDR0_EN          0x00004000
-#define CCSR_SSI_SIER_TDE1_EN          0x00002000
-#define CCSR_SSI_SIER_TDE0_EN          0x00001000
-#define CCSR_SSI_SIER_ROE1_EN          0x00000800
-#define CCSR_SSI_SIER_ROE0_EN          0x00000400
-#define CCSR_SSI_SIER_TUE1_EN          0x00000200
-#define CCSR_SSI_SIER_TUE0_EN          0x00000100
-#define CCSR_SSI_SIER_TFS_EN           0x00000080
-#define CCSR_SSI_SIER_RFS_EN           0x00000040
-#define CCSR_SSI_SIER_TLS_EN           0x00000020
-#define CCSR_SSI_SIER_RLS_EN           0x00000010
-#define CCSR_SSI_SIER_RFF1_EN          0x00000008
-#define CCSR_SSI_SIER_RFF0_EN          0x00000004
-#define CCSR_SSI_SIER_TFE1_EN          0x00000002
-#define CCSR_SSI_SIER_TFE0_EN          0x00000001
+/* SSI Control Register -- REG_SSI_SCR 0x10 */
+#define SSI_SCR_SYNC_TX_FS             0x00001000
+#define SSI_SCR_RFR_CLK_DIS            0x00000800
+#define SSI_SCR_TFR_CLK_DIS            0x00000400
+#define SSI_SCR_TCH_EN                 0x00000100
+#define SSI_SCR_SYS_CLK_EN             0x00000080
+#define SSI_SCR_I2S_MODE_MASK          0x00000060
+#define SSI_SCR_I2S_MODE_NORMAL                0x00000000
+#define SSI_SCR_I2S_MODE_MASTER                0x00000020
+#define SSI_SCR_I2S_MODE_SLAVE         0x00000040
+#define SSI_SCR_SYN                    0x00000010
+#define SSI_SCR_NET                    0x00000008
+#define SSI_SCR_I2S_NET_MASK           (SSI_SCR_NET | SSI_SCR_I2S_MODE_MASK)
+#define SSI_SCR_RE                     0x00000004
+#define SSI_SCR_TE                     0x00000002
+#define SSI_SCR_SSIEN                  0x00000001
 
-#define CCSR_SSI_STCR_TXBIT0           0x00000200
-#define CCSR_SSI_STCR_TFEN1            0x00000100
-#define CCSR_SSI_STCR_TFEN0            0x00000080
-#define CCSR_SSI_STCR_TFDIR            0x00000040
-#define CCSR_SSI_STCR_TXDIR            0x00000020
-#define CCSR_SSI_STCR_TSHFD            0x00000010
-#define CCSR_SSI_STCR_TSCKP            0x00000008
-#define CCSR_SSI_STCR_TFSI             0x00000004
-#define CCSR_SSI_STCR_TFSL             0x00000002
-#define CCSR_SSI_STCR_TEFS             0x00000001
+/* SSI Interrupt Status Register -- REG_SSI_SISR 0x14 */
+#define SSI_SISR_RFRC                  0x01000000
+#define SSI_SISR_TFRC                  0x00800000
+#define SSI_SISR_CMDAU                 0x00040000
+#define SSI_SISR_CMDDU                 0x00020000
+#define SSI_SISR_RXT                   0x00010000
+#define SSI_SISR_RDR1                  0x00008000
+#define SSI_SISR_RDR0                  0x00004000
+#define SSI_SISR_TDE1                  0x00002000
+#define SSI_SISR_TDE0                  0x00001000
+#define SSI_SISR_ROE1                  0x00000800
+#define SSI_SISR_ROE0                  0x00000400
+#define SSI_SISR_TUE1                  0x00000200
+#define SSI_SISR_TUE0                  0x00000100
+#define SSI_SISR_TFS                   0x00000080
+#define SSI_SISR_RFS                   0x00000040
+#define SSI_SISR_TLS                   0x00000020
+#define SSI_SISR_RLS                   0x00000010
+#define SSI_SISR_RFF1                  0x00000008
+#define SSI_SISR_RFF0                  0x00000004
+#define SSI_SISR_TFE1                  0x00000002
+#define SSI_SISR_TFE0                  0x00000001
 
-#define CCSR_SSI_SRCR_RXEXT            0x00000400
-#define CCSR_SSI_SRCR_RXBIT0           0x00000200
-#define CCSR_SSI_SRCR_RFEN1            0x00000100
-#define CCSR_SSI_SRCR_RFEN0            0x00000080
-#define CCSR_SSI_SRCR_RFDIR            0x00000040
-#define CCSR_SSI_SRCR_RXDIR            0x00000020
-#define CCSR_SSI_SRCR_RSHFD            0x00000010
-#define CCSR_SSI_SRCR_RSCKP            0x00000008
-#define CCSR_SSI_SRCR_RFSI             0x00000004
-#define CCSR_SSI_SRCR_RFSL             0x00000002
-#define CCSR_SSI_SRCR_REFS             0x00000001
+/* SSI Interrupt Enable Register -- REG_SSI_SIER 0x18 */
+#define SSI_SIER_RFRC_EN               0x01000000
+#define SSI_SIER_TFRC_EN               0x00800000
+#define SSI_SIER_RDMAE                 0x00400000
+#define SSI_SIER_RIE                   0x00200000
+#define SSI_SIER_TDMAE                 0x00100000
+#define SSI_SIER_TIE                   0x00080000
+#define SSI_SIER_CMDAU_EN              0x00040000
+#define SSI_SIER_CMDDU_EN              0x00020000
+#define SSI_SIER_RXT_EN                        0x00010000
+#define SSI_SIER_RDR1_EN               0x00008000
+#define SSI_SIER_RDR0_EN               0x00004000
+#define SSI_SIER_TDE1_EN               0x00002000
+#define SSI_SIER_TDE0_EN               0x00001000
+#define SSI_SIER_ROE1_EN               0x00000800
+#define SSI_SIER_ROE0_EN               0x00000400
+#define SSI_SIER_TUE1_EN               0x00000200
+#define SSI_SIER_TUE0_EN               0x00000100
+#define SSI_SIER_TFS_EN                        0x00000080
+#define SSI_SIER_RFS_EN                        0x00000040
+#define SSI_SIER_TLS_EN                        0x00000020
+#define SSI_SIER_RLS_EN                        0x00000010
+#define SSI_SIER_RFF1_EN               0x00000008
+#define SSI_SIER_RFF0_EN               0x00000004
+#define SSI_SIER_TFE1_EN               0x00000002
+#define SSI_SIER_TFE0_EN               0x00000001
 
-/* STCCR and SRCCR */
-#define CCSR_SSI_SxCCR_DIV2_SHIFT      18
-#define CCSR_SSI_SxCCR_DIV2            0x00040000
-#define CCSR_SSI_SxCCR_PSR_SHIFT       17
-#define CCSR_SSI_SxCCR_PSR             0x00020000
-#define CCSR_SSI_SxCCR_WL_SHIFT                13
-#define CCSR_SSI_SxCCR_WL_MASK         0x0001E000
-#define CCSR_SSI_SxCCR_WL(x) \
-       (((((x) / 2) - 1) << CCSR_SSI_SxCCR_WL_SHIFT) & CCSR_SSI_SxCCR_WL_MASK)
-#define CCSR_SSI_SxCCR_DC_SHIFT                8
-#define CCSR_SSI_SxCCR_DC_MASK         0x00001F00
-#define CCSR_SSI_SxCCR_DC(x) \
-       ((((x) - 1) << CCSR_SSI_SxCCR_DC_SHIFT) & CCSR_SSI_SxCCR_DC_MASK)
-#define CCSR_SSI_SxCCR_PM_SHIFT                0
-#define CCSR_SSI_SxCCR_PM_MASK         0x000000FF
-#define CCSR_SSI_SxCCR_PM(x) \
-       ((((x) - 1) << CCSR_SSI_SxCCR_PM_SHIFT) & CCSR_SSI_SxCCR_PM_MASK)
+/* SSI Transmit Configuration Register -- REG_SSI_STCR 0x1C */
+#define SSI_STCR_TXBIT0                        0x00000200
+#define SSI_STCR_TFEN1                 0x00000100
+#define SSI_STCR_TFEN0                 0x00000080
+#define SSI_STCR_TFDIR                 0x00000040
+#define SSI_STCR_TXDIR                 0x00000020
+#define SSI_STCR_TSHFD                 0x00000010
+#define SSI_STCR_TSCKP                 0x00000008
+#define SSI_STCR_TFSI                  0x00000004
+#define SSI_STCR_TFSL                  0x00000002
+#define SSI_STCR_TEFS                  0x00000001
+
+/* SSI Receive Configuration Register -- REG_SSI_SRCR 0x20 */
+#define SSI_SRCR_RXEXT                 0x00000400
+#define SSI_SRCR_RXBIT0                        0x00000200
+#define SSI_SRCR_RFEN1                 0x00000100
+#define SSI_SRCR_RFEN0                 0x00000080
+#define SSI_SRCR_RFDIR                 0x00000040
+#define SSI_SRCR_RXDIR                 0x00000020
+#define SSI_SRCR_RSHFD                 0x00000010
+#define SSI_SRCR_RSCKP                 0x00000008
+#define SSI_SRCR_RFSI                  0x00000004
+#define SSI_SRCR_RFSL                  0x00000002
+#define SSI_SRCR_REFS                  0x00000001
 
 /*
- * The xFCNT bits are read-only, and the xFWM bits are read/write.  Use the
- * CCSR_SSI_SFCSR_xFCNTy() macros to read the FIFO counters, and use the
- * CCSR_SSI_SFCSR_xFWMy() macros to set the watermarks.
+ * SSI Transmit Clock Control Register -- REG_SSI_STCCR 0x24
+ * SSI Receive Clock Control Register -- REG_SSI_SRCCR 0x28
+ */
+#define SSI_SxCCR_DIV2_SHIFT           18
+#define SSI_SxCCR_DIV2                 0x00040000
+#define SSI_SxCCR_PSR_SHIFT            17
+#define SSI_SxCCR_PSR                  0x00020000
+#define SSI_SxCCR_WL_SHIFT             13
+#define SSI_SxCCR_WL_MASK              0x0001E000
+#define SSI_SxCCR_WL(x) \
+       (((((x) / 2) - 1) << SSI_SxCCR_WL_SHIFT) & SSI_SxCCR_WL_MASK)
+#define SSI_SxCCR_DC_SHIFT             8
+#define SSI_SxCCR_DC_MASK              0x00001F00
+#define SSI_SxCCR_DC(x) \
+       ((((x) - 1) << SSI_SxCCR_DC_SHIFT) & SSI_SxCCR_DC_MASK)
+#define SSI_SxCCR_PM_SHIFT             0
+#define SSI_SxCCR_PM_MASK              0x000000FF
+#define SSI_SxCCR_PM(x) \
+       ((((x) - 1) << SSI_SxCCR_PM_SHIFT) & SSI_SxCCR_PM_MASK)
+
+/*
+ * SSI FIFO Control/Status Register -- REG_SSI_SFCSR 0x2c
+ *
+ * Tx or Rx FIFO Counter -- SSI_SFCSR_xFCNTy Read-Only
+ * Tx or Rx FIFO Watermarks -- SSI_SFCSR_xFWMy Read/Write
  */
-#define CCSR_SSI_SFCSR_RFCNT1_SHIFT    28
-#define CCSR_SSI_SFCSR_RFCNT1_MASK     0xF0000000
-#define CCSR_SSI_SFCSR_RFCNT1(x) \
-       (((x) & CCSR_SSI_SFCSR_RFCNT1_MASK) >> CCSR_SSI_SFCSR_RFCNT1_SHIFT)
-#define CCSR_SSI_SFCSR_TFCNT1_SHIFT    24
-#define CCSR_SSI_SFCSR_TFCNT1_MASK     0x0F000000
-#define CCSR_SSI_SFCSR_TFCNT1(x) \
-       (((x) & CCSR_SSI_SFCSR_TFCNT1_MASK) >> CCSR_SSI_SFCSR_TFCNT1_SHIFT)
-#define CCSR_SSI_SFCSR_RFWM1_SHIFT     20
-#define CCSR_SSI_SFCSR_RFWM1_MASK      0x00F00000
-#define CCSR_SSI_SFCSR_RFWM1(x)        \
-       (((x) << CCSR_SSI_SFCSR_RFWM1_SHIFT) & CCSR_SSI_SFCSR_RFWM1_MASK)
-#define CCSR_SSI_SFCSR_TFWM1_SHIFT     16
-#define CCSR_SSI_SFCSR_TFWM1_MASK      0x000F0000
-#define CCSR_SSI_SFCSR_TFWM1(x)        \
-       (((x) << CCSR_SSI_SFCSR_TFWM1_SHIFT) & CCSR_SSI_SFCSR_TFWM1_MASK)
-#define CCSR_SSI_SFCSR_RFCNT0_SHIFT    12
-#define CCSR_SSI_SFCSR_RFCNT0_MASK     0x0000F000
-#define CCSR_SSI_SFCSR_RFCNT0(x) \
-       (((x) & CCSR_SSI_SFCSR_RFCNT0_MASK) >> CCSR_SSI_SFCSR_RFCNT0_SHIFT)
-#define CCSR_SSI_SFCSR_TFCNT0_SHIFT    8
-#define CCSR_SSI_SFCSR_TFCNT0_MASK     0x00000F00
-#define CCSR_SSI_SFCSR_TFCNT0(x) \
-       (((x) & CCSR_SSI_SFCSR_TFCNT0_MASK) >> CCSR_SSI_SFCSR_TFCNT0_SHIFT)
-#define CCSR_SSI_SFCSR_RFWM0_SHIFT     4
-#define CCSR_SSI_SFCSR_RFWM0_MASK      0x000000F0
-#define CCSR_SSI_SFCSR_RFWM0(x)        \
-       (((x) << CCSR_SSI_SFCSR_RFWM0_SHIFT) & CCSR_SSI_SFCSR_RFWM0_MASK)
-#define CCSR_SSI_SFCSR_TFWM0_SHIFT     0
-#define CCSR_SSI_SFCSR_TFWM0_MASK      0x0000000F
-#define CCSR_SSI_SFCSR_TFWM0(x)        \
-       (((x) << CCSR_SSI_SFCSR_TFWM0_SHIFT) & CCSR_SSI_SFCSR_TFWM0_MASK)
+#define SSI_SFCSR_RFCNT1_SHIFT         28
+#define SSI_SFCSR_RFCNT1_MASK          0xF0000000
+#define SSI_SFCSR_RFCNT1(x) \
+       (((x) & SSI_SFCSR_RFCNT1_MASK) >> SSI_SFCSR_RFCNT1_SHIFT)
+#define SSI_SFCSR_TFCNT1_SHIFT         24
+#define SSI_SFCSR_TFCNT1_MASK          0x0F000000
+#define SSI_SFCSR_TFCNT1(x) \
+       (((x) & SSI_SFCSR_TFCNT1_MASK) >> SSI_SFCSR_TFCNT1_SHIFT)
+#define SSI_SFCSR_RFWM1_SHIFT          20
+#define SSI_SFCSR_RFWM1_MASK           0x00F00000
+#define SSI_SFCSR_RFWM1(x)     \
+       (((x) << SSI_SFCSR_RFWM1_SHIFT) & SSI_SFCSR_RFWM1_MASK)
+#define SSI_SFCSR_TFWM1_SHIFT          16
+#define SSI_SFCSR_TFWM1_MASK           0x000F0000
+#define SSI_SFCSR_TFWM1(x)     \
+       (((x) << SSI_SFCSR_TFWM1_SHIFT) & SSI_SFCSR_TFWM1_MASK)
+#define SSI_SFCSR_RFCNT0_SHIFT         12
+#define SSI_SFCSR_RFCNT0_MASK          0x0000F000
+#define SSI_SFCSR_RFCNT0(x) \
+       (((x) & SSI_SFCSR_RFCNT0_MASK) >> SSI_SFCSR_RFCNT0_SHIFT)
+#define SSI_SFCSR_TFCNT0_SHIFT         8
+#define SSI_SFCSR_TFCNT0_MASK          0x00000F00
+#define SSI_SFCSR_TFCNT0(x) \
+       (((x) & SSI_SFCSR_TFCNT0_MASK) >> SSI_SFCSR_TFCNT0_SHIFT)
+#define SSI_SFCSR_RFWM0_SHIFT          4
+#define SSI_SFCSR_RFWM0_MASK           0x000000F0
+#define SSI_SFCSR_RFWM0(x)     \
+       (((x) << SSI_SFCSR_RFWM0_SHIFT) & SSI_SFCSR_RFWM0_MASK)
+#define SSI_SFCSR_TFWM0_SHIFT          0
+#define SSI_SFCSR_TFWM0_MASK           0x0000000F
+#define SSI_SFCSR_TFWM0(x)     \
+       (((x) << SSI_SFCSR_TFWM0_SHIFT) & SSI_SFCSR_TFWM0_MASK)
 
-#define CCSR_SSI_STR_TEST              0x00008000
-#define CCSR_SSI_STR_RCK2TCK           0x00004000
-#define CCSR_SSI_STR_RFS2TFS           0x00002000
-#define CCSR_SSI_STR_RXSTATE(x) (((x) >> 8) & 0x1F)
-#define CCSR_SSI_STR_TXD2RXD           0x00000080
-#define CCSR_SSI_STR_TCK2RCK           0x00000040
-#define CCSR_SSI_STR_TFS2RFS           0x00000020
-#define CCSR_SSI_STR_TXSTATE(x) ((x) & 0x1F)
+/* SSI Test Register -- REG_SSI_STR 0x30 */
+#define SSI_STR_TEST                   0x00008000
+#define SSI_STR_RCK2TCK                        0x00004000
+#define SSI_STR_RFS2TFS                        0x00002000
+#define SSI_STR_RXSTATE(x)             (((x) >> 8) & 0x1F)
+#define SSI_STR_TXD2RXD                        0x00000080
+#define SSI_STR_TCK2RCK                        0x00000040
+#define SSI_STR_TFS2RFS                        0x00000020
+#define SSI_STR_TXSTATE(x)             ((x) & 0x1F)
 
-#define CCSR_SSI_SOR_CLKOFF            0x00000040
-#define CCSR_SSI_SOR_RX_CLR            0x00000020
-#define CCSR_SSI_SOR_TX_CLR            0x00000010
-#define CCSR_SSI_SOR_INIT              0x00000008
-#define CCSR_SSI_SOR_WAIT_SHIFT                1
-#define CCSR_SSI_SOR_WAIT_MASK         0x00000006
-#define CCSR_SSI_SOR_WAIT(x) (((x) & 3) << CCSR_SSI_SOR_WAIT_SHIFT)
-#define CCSR_SSI_SOR_SYNRST            0x00000001
+/* SSI Option Register -- REG_SSI_SOR 0x34 */
+#define SSI_SOR_CLKOFF                 0x00000040
+#define SSI_SOR_RX_CLR                 0x00000020
+#define SSI_SOR_TX_CLR                 0x00000010
+#define SSI_SOR_xX_CLR(tx)             ((tx) ? SSI_SOR_TX_CLR : SSI_SOR_RX_CLR)
+#define SSI_SOR_INIT                   0x00000008
+#define SSI_SOR_WAIT_SHIFT             1
+#define SSI_SOR_WAIT_MASK              0x00000006
+#define SSI_SOR_WAIT(x)                        (((x) & 3) << SSI_SOR_WAIT_SHIFT)
+#define SSI_SOR_SYNRST                 0x00000001
 
-#define CCSR_SSI_SACNT_FRDIV(x)                (((x) & 0x3f) << 5)
-#define CCSR_SSI_SACNT_WR              0x00000010
-#define CCSR_SSI_SACNT_RD              0x00000008
-#define CCSR_SSI_SACNT_RDWR_MASK       0x00000018
-#define CCSR_SSI_SACNT_TIF             0x00000004
-#define CCSR_SSI_SACNT_FV              0x00000002
-#define CCSR_SSI_SACNT_AC97EN          0x00000001
+/* SSI AC97 Control Register -- REG_SSI_SACNT 0x38 */
+#define SSI_SACNT_FRDIV(x)             (((x) & 0x3f) << 5)
+#define SSI_SACNT_WR                   0x00000010
+#define SSI_SACNT_RD                   0x00000008
+#define SSI_SACNT_RDWR_MASK            0x00000018
+#define SSI_SACNT_TIF                  0x00000004
+#define SSI_SACNT_FV                   0x00000002
+#define SSI_SACNT_AC97EN               0x00000001
 
 
 struct device;
@@ -255,7 +318,7 @@ static inline void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *stats, u32 sisr)
 }
 
 static inline int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg,
-               struct device *dev)
+                                        struct device *dev)
 {
        return 0;
 }
index 5469ffb..7aac63e 100644 (file)
 
 void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *dbg, u32 sisr)
 {
-       if (sisr & CCSR_SSI_SISR_RFRC)
+       if (sisr & SSI_SISR_RFRC)
                dbg->stats.rfrc++;
 
-       if (sisr & CCSR_SSI_SISR_TFRC)
+       if (sisr & SSI_SISR_TFRC)
                dbg->stats.tfrc++;
 
-       if (sisr & CCSR_SSI_SISR_CMDAU)
+       if (sisr & SSI_SISR_CMDAU)
                dbg->stats.cmdau++;
 
-       if (sisr & CCSR_SSI_SISR_CMDDU)
+       if (sisr & SSI_SISR_CMDDU)
                dbg->stats.cmddu++;
 
-       if (sisr & CCSR_SSI_SISR_RXT)
+       if (sisr & SSI_SISR_RXT)
                dbg->stats.rxt++;
 
-       if (sisr & CCSR_SSI_SISR_RDR1)
+       if (sisr & SSI_SISR_RDR1)
                dbg->stats.rdr1++;
 
-       if (sisr & CCSR_SSI_SISR_RDR0)
+       if (sisr & SSI_SISR_RDR0)
                dbg->stats.rdr0++;
 
-       if (sisr & CCSR_SSI_SISR_TDE1)
+       if (sisr & SSI_SISR_TDE1)
                dbg->stats.tde1++;
 
-       if (sisr & CCSR_SSI_SISR_TDE0)
+       if (sisr & SSI_SISR_TDE0)
                dbg->stats.tde0++;
 
-       if (sisr & CCSR_SSI_SISR_ROE1)
+       if (sisr & SSI_SISR_ROE1)
                dbg->stats.roe1++;
 
-       if (sisr & CCSR_SSI_SISR_ROE0)
+       if (sisr & SSI_SISR_ROE0)
                dbg->stats.roe0++;
 
-       if (sisr & CCSR_SSI_SISR_TUE1)
+       if (sisr & SSI_SISR_TUE1)
                dbg->stats.tue1++;
 
-       if (sisr & CCSR_SSI_SISR_TUE0)
+       if (sisr & SSI_SISR_TUE0)
                dbg->stats.tue0++;
 
-       if (sisr & CCSR_SSI_SISR_TFS)
+       if (sisr & SSI_SISR_TFS)
                dbg->stats.tfs++;
 
-       if (sisr & CCSR_SSI_SISR_RFS)
+       if (sisr & SSI_SISR_RFS)
                dbg->stats.rfs++;
 
-       if (sisr & CCSR_SSI_SISR_TLS)
+       if (sisr & SSI_SISR_TLS)
                dbg->stats.tls++;
 
-       if (sisr & CCSR_SSI_SISR_RLS)
+       if (sisr & SSI_SISR_RLS)
                dbg->stats.rls++;
 
-       if (sisr & CCSR_SSI_SISR_RFF1)
+       if (sisr & SSI_SISR_RFF1)
                dbg->stats.rff1++;
 
-       if (sisr & CCSR_SSI_SISR_RFF0)
+       if (sisr & SSI_SISR_RFF0)
                dbg->stats.rff0++;
 
-       if (sisr & CCSR_SSI_SISR_TFE1)
+       if (sisr & SSI_SISR_TFE1)
                dbg->stats.tfe1++;
 
-       if (sisr & CCSR_SSI_SISR_TFE0)
+       if (sisr & SSI_SISR_TFE0)
                dbg->stats.tfe0++;
 }
 
-/* Show the statistics of a flag only if its interrupt is enabled.  The
- * compiler will optimze this code to a no-op if the interrupt is not
- * enabled.
+/**
+ * Show the statistics of a flag only if its interrupt is enabled
+ *
+ * Compilers will optimize it to a no-op if the interrupt is disabled
  */
 #define SIER_SHOW(flag, name) \
        do { \
-               if (CCSR_SSI_SIER_##flag) \
+               if (SSI_SIER_##flag) \
                        seq_printf(s, #name "=%u\n", ssi_dbg->stats.name); \
        } while (0)
 
 
 /**
- * fsl_sysfs_ssi_show: display SSI statistics
+ * Display the statistics for the current SSI device
  *
- * Display the statistics for the current SSI device.  To avoid confusion,
- * we only show those counts that are enabled.
+ * To avoid confusion, only show those counts that are enabled
  */
 static int fsl_ssi_stats_show(struct seq_file *s, void *unused)
 {
@@ -147,7 +147,8 @@ int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg, struct device *dev)
                return -ENOMEM;
 
        ssi_dbg->dbg_stats = debugfs_create_file("stats", S_IRUGO,
-                       ssi_dbg->dbg_dir, ssi_dbg, &fsl_ssi_stats_ops);
+                                                ssi_dbg->dbg_dir, ssi_dbg,
+                                                &fsl_ssi_stats_ops);
        if (!ssi_dbg->dbg_stats) {
                debugfs_remove(ssi_dbg->dbg_dir);
                return -ENOMEM;
index 0c8f86d..07a5720 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/of_irq.h>
 #include <linux/mfd/syscon.h>
 #include <linux/reset-controller.h>
-#include <linux/clk.h>
 
 #include "hi6210-i2s.h"
 
index 7b49d04..f2c9e8c 100644 (file)
+config SND_SOC_INTEL_SST_TOPLEVEL
+       bool "Intel ASoC SST drivers"
+       default y
+       depends on X86 || COMPILE_TEST
+       select SND_SOC_INTEL_MACH
+       help
+         Intel ASoC SST Platform Drivers. If you have a Intel machine that
+         has an audio controller with a DSP and I2S or DMIC port, then
+         enable this option by saying Y
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Intel SST drivers.
+
+if SND_SOC_INTEL_SST_TOPLEVEL
+
 config SND_SST_IPC
        tristate
+       # This option controls the IPC core for HiFi2 platforms
 
 config SND_SST_IPC_PCI
        tristate
        select SND_SST_IPC
+       # This option controls the PCI-based IPC for HiFi2 platforms
+       #  (Medfield, Merrifield).
 
 config SND_SST_IPC_ACPI
        tristate
        select SND_SST_IPC
-       select SND_SOC_INTEL_SST
-       select IOSF_MBI
+       # This option controls the ACPI-based IPC for HiFi2 platforms
+       # (Baytrail, Cherrytrail)
 
-config SND_SOC_INTEL_COMMON
+config SND_SOC_INTEL_SST_ACPI
        tristate
+       # This option controls ACPI-based probing on
+       # Haswell/Broadwell/Baytrail legacy and will be set
+       # when these platforms are enabled
 
 config SND_SOC_INTEL_SST
        tristate
-       select SND_SOC_INTEL_SST_ACPI if ACPI
 
 config SND_SOC_INTEL_SST_FIRMWARE
        tristate
        select DW_DMAC_CORE
-
-config SND_SOC_INTEL_SST_ACPI
-       tristate
-
-config SND_SOC_ACPI_INTEL_MATCH
-       tristate
-       select SND_SOC_ACPI if ACPI
-
-config SND_SOC_INTEL_SST_TOPLEVEL
-       tristate "Intel ASoC SST drivers"
-       depends on X86 || COMPILE_TEST
-       select SND_SOC_INTEL_MACH
-       select SND_SOC_INTEL_COMMON
-       help
-          Intel ASoC Audio Drivers. If you have a Intel machine that
-          has audio controller with a DSP and I2S or DMIC port, then
-          enable this option by saying Y or M
-          If unsure select "N".
+       # This option controls firmware download on
+       # Haswell/Broadwell/Baytrail legacy and will be set
+       # when these platforms are enabled
 
 config SND_SOC_INTEL_HASWELL
-       tristate "Intel ASoC SST driver for Haswell/Broadwell"
-       depends on SND_SOC_INTEL_SST_TOPLEVEL && SND_DMA_SGBUF
-       depends on DMADEVICES
+       tristate "Haswell/Broadwell Platforms"
+       depends on SND_DMA_SGBUF
+       depends on DMADEVICES && ACPI
        select SND_SOC_INTEL_SST
+       select SND_SOC_INTEL_SST_ACPI
        select SND_SOC_INTEL_SST_FIRMWARE
+       select SND_SOC_ACPI_INTEL_MATCH
+       help
+         If you have a Intel Haswell or Broadwell platform connected to
+         an I2S codec, then enable this option by saying Y or m. This is
+         typically used for Chromebooks. This is a recommended option.
 
 config SND_SOC_INTEL_BAYTRAIL
-       tristate "Intel ASoC SST driver for Baytrail (legacy)"
-       depends on SND_SOC_INTEL_SST_TOPLEVEL
-       depends on DMADEVICES
+       tristate "Baytrail (legacy) Platforms"
+       depends on DMADEVICES && ACPI
        select SND_SOC_INTEL_SST
+       select SND_SOC_INTEL_SST_ACPI
        select SND_SOC_INTEL_SST_FIRMWARE
+       select SND_SOC_ACPI_INTEL_MATCH
+       help
+         If you have a Intel Baytrail platform connected to an I2S codec,
+         then enable this option by saying Y or m. This was typically used
+         for Baytrail Chromebooks but this option is now deprecated and is
+         not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead.
+
+config SND_SST_ATOM_HIFI2_PLATFORM_PCI
+       tristate "PCI HiFi2 (Medfield, Merrifield) Platforms"
+       depends on X86 && PCI
+       select SND_SST_IPC_PCI
+       select SND_SOC_COMPRESS
+       help
+         If you have a Intel Medfield or Merrifield/Edison platform, then
+         enable this option by saying Y or m. Distros will typically not
+         enable this option: Medfield devices are not available to
+         developers and while Merrifield/Edison can run a mainline kernel with
+         limited functionality it will require a firmware file which
+         is not in the standard firmware tree
 
 config SND_SST_ATOM_HIFI2_PLATFORM
-       tristate "Intel ASoC SST driver for HiFi2 platforms (*field, *trail)"
-       depends on SND_SOC_INTEL_SST_TOPLEVEL && X86
+       tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
+       depends on X86 && ACPI
+       select SND_SST_IPC_ACPI
        select SND_SOC_COMPRESS
+       select SND_SOC_ACPI_INTEL_MATCH
+       select IOSF_MBI
+       help
+         If you have a Intel Baytrail or Cherrytrail platform with an I2S
+         codec, then enable this option by saying Y or m. This is a
+         recommended option
 
 config SND_SOC_INTEL_SKYLAKE
-       tristate "Intel ASoC SST driver for SKL/BXT/KBL/GLK/CNL"
-       depends on SND_SOC_INTEL_SST_TOPLEVEL && PCI && ACPI
+       tristate "SKL/BXT/KBL/GLK/CNL... Platforms"
+       depends on PCI && ACPI
        select SND_HDA_EXT_CORE
        select SND_HDA_DSP_LOADER
        select SND_SOC_TOPOLOGY
        select SND_SOC_INTEL_SST
+       select SND_SOC_ACPI_INTEL_MATCH
+       help
+         If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
+         GeminiLake or CannonLake platform with the DSP enabled in the BIOS
+         then enable this option by saying Y or m.
+
+config SND_SOC_ACPI_INTEL_MATCH
+       tristate
+       select SND_SOC_ACPI if ACPI
+       # this option controls the compilation of ACPI matching tables and
+       # helpers and is not meant to be selected by the user.
+
+endif ## SND_SOC_INTEL_SST_TOPLEVEL
 
 # ASoC codec drivers
 source "sound/soc/intel/boards/Kconfig"
index b973d45..8160520 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Core support
-obj-$(CONFIG_SND_SOC_INTEL_COMMON) += common/
+obj-$(CONFIG_SND_SOC) += common/
 
 # Platform Support
 obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
index 32d6e02..6cd481b 100644 (file)
@@ -236,6 +236,9 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
        /* Find the IRQ */
        ctx->irq_num = platform_get_irq(pdev,
                                ctx->pdata->res_info->acpi_ipc_irq_index);
+       if (ctx->irq_num <= 0)
+               return ctx->irq_num < 0 ? ctx->irq_num : -EIO;
+
        return 0;
 }
 
index 65e257b..7ee6aeb 100644 (file)
@@ -220,10 +220,10 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
                sst_free_block(sst_drv_ctx, block);
 out:
        test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
-       return 0;
+       return ret;
 }
 
-/*
+/**
  * sst_pause_stream - Send msg for a pausing stream
  * @str_id:     stream ID
  *
@@ -261,7 +261,7 @@ int sst_pause_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
                }
        } else {
                retval = -EBADRQC;
-               dev_dbg(sst_drv_ctx->dev, "SST DBG:BADRQC for stream\n ");
+               dev_dbg(sst_drv_ctx->dev, "SST DBG:BADRQC for stream\n");
        }
 
        return retval;
@@ -284,7 +284,7 @@ int sst_resume_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
        if (!str_info)
                return -EINVAL;
        if (str_info->status == STREAM_RUNNING)
-                       return 0;
+               return 0;
        if (str_info->status == STREAM_PAUSED) {
                retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id,
                                IPC_CMD, IPC_IA_RESUME_STREAM_MRFLD,
index 6f75470..d4e1036 100644 (file)
-config SND_SOC_INTEL_MACH
-       tristate "Intel Audio machine drivers"
+menuconfig SND_SOC_INTEL_MACH
+       bool "Intel Machine drivers"
        depends on SND_SOC_INTEL_SST_TOPLEVEL
-       select SND_SOC_ACPI_INTEL_MATCH if ACPI
+       help
+         Intel ASoC Machine Drivers. If you have a Intel machine that
+         has an audio controller with a DSP and I2S or DMIC port, then
+         enable this option by saying Y
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Intel ASoC machine drivers.
 
 if SND_SOC_INTEL_MACH
 
-config SND_MFLD_MACHINE
-       tristate "SOC Machine Audio driver for Intel Medfield MID platform"
-       depends on INTEL_SCU_IPC
-       select SND_SOC_SN95031
-       depends on SND_SST_ATOM_HIFI2_PLATFORM
-       select SND_SST_IPC_PCI
-       help
-          This adds support for ASoC machine driver for Intel(R) MID Medfield platform
-          used as alsa device in audio substem in Intel(R) MID devices
-          Say Y if you have such a device.
-          If unsure select "N".
+if SND_SOC_INTEL_HASWELL
 
 config SND_SOC_INTEL_HASWELL_MACH
-       tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
+       tristate "Haswell Lynxpoint"
        depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
-       depends on SND_SOC_INTEL_HASWELL
        select SND_SOC_RT5640
        help
          This adds support for the Lynxpoint Audio DSP on Intel(R) Haswell
-         Ultrabook platforms.
-         Say Y if you have such a device.
+         Ultrabook platforms. This is a recommended option.
+         Say Y or m if you have such a device.
          If unsure select "N".
 
 config SND_SOC_INTEL_BDW_RT5677_MACH
-       tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
-       depends on X86_INTEL_LPSS && GPIOLIB && I2C
-       depends on SND_SOC_INTEL_HASWELL
+       tristate "Broadwell with RT5677 codec"
+       depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM && GPIOLIB
        select SND_SOC_RT5677
        help
          This adds support for Intel Broadwell platform based boards with
-         the RT5677 audio codec.
+         the RT5677 audio codec. This is a recommended option.
+         Say Y or m if you have such a device.
+         If unsure select "N".
 
 config SND_SOC_INTEL_BROADWELL_MACH
-       tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
+       tristate "Broadwell Wildcatpoint"
        depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
-       depends on SND_SOC_INTEL_HASWELL
        select SND_SOC_RT286
        help
          This adds support for the Wilcatpoint Audio DSP on Intel(R) Broadwell
          Ultrabook platforms.
-         Say Y if you have such a device.
+         Say Y or m if you have such a device. This is a recommended option.
          If unsure select "N".
+endif ## SND_SOC_INTEL_HASWELL
+
+if SND_SOC_INTEL_BAYTRAIL
 
 config SND_SOC_INTEL_BYT_MAX98090_MACH
-       tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
+       tristate "Baytrail with MAX98090 codec"
        depends on X86_INTEL_LPSS && I2C
-       depends on SND_SST_IPC_ACPI = n
-       depends on SND_SOC_INTEL_BAYTRAIL
        select SND_SOC_MAX98090
        help
          This adds audio driver for Intel Baytrail platform based boards
-         with the MAX98090 audio codec.
+         with the MAX98090 audio codec. This driver is deprecated, use
+         SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH instead for better
+         functionality.
 
 config SND_SOC_INTEL_BYT_RT5640_MACH
-       tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
+       tristate "Baytrail with RT5640 codec"
        depends on X86_INTEL_LPSS && I2C
-       depends on SND_SST_IPC_ACPI = n
-       depends on SND_SOC_INTEL_BAYTRAIL
        select SND_SOC_RT5640
        help
          This adds audio driver for Intel Baytrail platform based boards
          with the RT5640 audio codec. This driver is deprecated, use
          SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality.
 
+endif ## SND_SOC_INTEL_BAYTRAIL
+
+if SND_SST_ATOM_HIFI2_PLATFORM
+
 config SND_SOC_INTEL_BYTCR_RT5640_MACH
-        tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5640 codec"
-       depends on X86 && I2C && ACPI
+       tristate "Baytrail and Baytrail-CR with RT5640 codec"
+       depends on X86_INTEL_LPSS && I2C && ACPI
+       select SND_SOC_ACPI
        select SND_SOC_RT5640
-       depends on SND_SST_ATOM_HIFI2_PLATFORM
-       select SND_SST_IPC_ACPI
        help
-          This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
-          platforms with RT5640 audio codec.
-          Say Y if you have such a device.
-          If unsure select "N".
+         This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
+         platforms with RT5640 audio codec.
+         Say Y or m if you have such a device. This is a recommended option.
+         If unsure select "N".
 
 config SND_SOC_INTEL_BYTCR_RT5651_MACH
-        tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5651 codec"
-       depends on X86 && I2C && ACPI
+       tristate "Baytrail and Baytrail-CR with RT5651 codec"
+       depends on X86_INTEL_LPSS && I2C && ACPI
+       select SND_SOC_ACPI
        select SND_SOC_RT5651
-       depends on SND_SST_ATOM_HIFI2_PLATFORM
-       select SND_SST_IPC_ACPI
        help
-          This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
-          platforms with RT5651 audio codec.
-          Say Y if you have such a device.
-          If unsure select "N".
+         This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
+         platforms with RT5651 audio codec.
+         Say Y or m if you have such a device. This is a recommended option.
+         If unsure select "N".
 
 config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
-        tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
+       tristate "Cherrytrail & Braswell with RT5672 codec"
        depends on X86_INTEL_LPSS && I2C && ACPI
-        select SND_SOC_RT5670
-        depends on SND_SST_ATOM_HIFI2_PLATFORM
-        select SND_SST_IPC_ACPI
+       select SND_SOC_ACPI
+       select SND_SOC_RT5670
         help
           This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
           platforms with RT5672 audio codec.
-          Say Y if you have such a device.
+          Say Y or m if you have such a device. This is a recommended option.
           If unsure select "N".
 
 config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
-       tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec"
+       tristate "Cherrytrail & Braswell with RT5645/5650 codec"
        depends on X86_INTEL_LPSS && I2C && ACPI
+       select SND_SOC_ACPI
        select SND_SOC_RT5645
-       depends on SND_SST_ATOM_HIFI2_PLATFORM
-       select SND_SST_IPC_ACPI
        help
          This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
          platforms with RT5645/5650 audio codec.
+         Say Y or m if you have such a device. This is a recommended option.
          If unsure select "N".
 
 config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
-       tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with MAX98090 & TI codec"
+       tristate "Cherrytrail & Braswell with MAX98090 & TI codec"
        depends on X86_INTEL_LPSS && I2C && ACPI
        select SND_SOC_MAX98090
        select SND_SOC_TS3A227E
-       depends on SND_SST_ATOM_HIFI2_PLATFORM
-       select SND_SST_IPC_ACPI
        help
          This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
          platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
+         Say Y or m if you have such a device. This is a recommended option.
          If unsure select "N".
 
 config SND_SOC_INTEL_BYT_CHT_DA7213_MACH
-       tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with DA7212/7213 codec"
+       tristate "Baytrail & Cherrytrail with DA7212/7213 codec"
        depends on X86_INTEL_LPSS && I2C && ACPI
+       select SND_SOC_ACPI
        select SND_SOC_DA7213
-       depends on SND_SST_ATOM_HIFI2_PLATFORM
-       select SND_SST_IPC_ACPI
        help
          This adds support for ASoC machine driver for Intel(R) Baytrail & CherryTrail
          platforms with DA7212/7213 audio codec.
+         Say Y or m if you have such a device. This is a recommended option.
          If unsure select "N".
 
 config SND_SOC_INTEL_BYT_CHT_ES8316_MACH
-       tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with ES8316 codec"
+       tristate "Baytrail & Cherrytrail with ES8316 codec"
        depends on X86_INTEL_LPSS && I2C && ACPI
+       select SND_SOC_ACPI
        select SND_SOC_ES8316
-       depends on SND_SST_ATOM_HIFI2_PLATFORM
-       select SND_SST_IPC_ACPI
        help
          This adds support for ASoC machine driver for Intel(R) Baytrail &
          Cherrytrail platforms with ES8316 audio codec.
+         Say Y or m if you have such a device. This is a recommended option.
          If unsure select "N".
 
 config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
-       tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
+       tristate "Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
        depends on X86_INTEL_LPSS && I2C && ACPI
-       depends on SND_SST_ATOM_HIFI2_PLATFORM
-       select SND_SST_IPC_ACPI
        help
          This adds support for ASoC machine driver for the MinnowBoard Max or
          Up boards and provides access to I2S signals on the Low-Speed
-         connector
+         connector. This is not a recommended option outside of these cases.
+         It is not intended to be enabled by distros by default.
+         Say Y or m if you have such a device.
+
          If unsure select "N".
 
+endif ## SND_SST_ATOM_HIFI2_PLATFORM
+
+if SND_SOC_INTEL_SKYLAKE
+
 config SND_SOC_INTEL_SKL_RT286_MACH
-       tristate "ASoC Audio driver for SKL with RT286 I2S mode"
-       depends on X86 && ACPI && I2C
-       depends on SND_SOC_INTEL_SKYLAKE
+       tristate "SKL with RT286 I2S mode"
+       depends on MFD_INTEL_LPSS && I2C && ACPI
        select SND_SOC_RT286
        select SND_SOC_DMIC
        select SND_SOC_HDAC_HDMI
        help
           This adds support for ASoC machine driver for Skylake platforms
           with RT286 I2S audio codec.
-          Say Y if you have such a device.
+          Say Y or m if you have such a device.
           If unsure select "N".
 
 config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
-       tristate "ASoC Audio driver for SKL with NAU88L25 and SSM4567 in I2S Mode"
-       depends on X86_INTEL_LPSS && I2C
-       depends on SND_SOC_INTEL_SKYLAKE
+       tristate "SKL with NAU88L25 and SSM4567 in I2S Mode"
+       depends on MFD_INTEL_LPSS && I2C && ACPI
        select SND_SOC_NAU8825
        select SND_SOC_SSM4567
        select SND_SOC_DMIC
@@ -185,13 +185,12 @@ config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
        help
          This adds support for ASoC Onboard Codec I2S machine driver. This will
          create an alsa sound card for NAU88L25 + SSM4567.
-         Say Y if you have such a device.
+         Say Y or m if you have such a device. This is a recommended option.
          If unsure select "N".
 
 config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
-       tristate "ASoC Audio driver for SKL with NAU88L25 and MAX98357A in I2S Mode"
-       depends on X86_INTEL_LPSS && I2C
-       depends on SND_SOC_INTEL_SKYLAKE
+       tristate "SKL with NAU88L25 and MAX98357A in I2S Mode"
+       depends on MFD_INTEL_LPSS && I2C && ACPI
        select SND_SOC_NAU8825
        select SND_SOC_MAX98357A
        select SND_SOC_DMIC
@@ -199,13 +198,12 @@ config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
        help
          This adds support for ASoC Onboard Codec I2S machine driver. This will
          create an alsa sound card for NAU88L25 + MAX98357A.
-         Say Y if you have such a device.
+         Say Y or m if you have such a device. This is a recommended option.
          If unsure select "N".
 
 config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
-       tristate "ASoC Audio driver for Broxton with DA7219 and MAX98357A in I2S Mode"
-       depends on X86 && ACPI && I2C
-       depends on SND_SOC_INTEL_SKYLAKE
+       tristate "Broxton with DA7219 and MAX98357A in I2S Mode"
+       depends on MFD_INTEL_LPSS && I2C && ACPI
        select SND_SOC_DA7219
        select SND_SOC_MAX98357A
        select SND_SOC_DMIC
@@ -214,13 +212,12 @@ config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
        help
           This adds support for ASoC machine driver for Broxton-P platforms
           with DA7219 + MAX98357A I2S audio codec.
-          Say Y if you have such a device.
+          Say Y or m if you have such a device. This is a recommended option.
           If unsure select "N".
 
 config SND_SOC_INTEL_BXT_RT298_MACH
-       tristate "ASoC Audio driver for Broxton with RT298 I2S mode"
-       depends on X86 && ACPI && I2C
-       depends on SND_SOC_INTEL_SKYLAKE
+       tristate "Broxton with RT298 I2S mode"
+       depends on MFD_INTEL_LPSS && I2C && ACPI
        select SND_SOC_RT298
        select SND_SOC_DMIC
        select SND_SOC_HDAC_HDMI
@@ -228,14 +225,12 @@ config SND_SOC_INTEL_BXT_RT298_MACH
        help
           This adds support for ASoC machine driver for Broxton platforms
           with RT286 I2S audio codec.
-          Say Y if you have such a device.
+          Say Y or m if you have such a device. This is a recommended option.
           If unsure select "N".
 
 config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
-       tristate "ASoC Audio driver for KBL with RT5663 and MAX98927 in I2S Mode"
-       depends on X86_INTEL_LPSS && I2C
-       select SND_SOC_INTEL_SST
-       depends on SND_SOC_INTEL_SKYLAKE
+       tristate "KBL with RT5663 and MAX98927 in I2S Mode"
+       depends on MFD_INTEL_LPSS && I2C && ACPI
        select SND_SOC_RT5663
        select SND_SOC_MAX98927
        select SND_SOC_DMIC
@@ -243,14 +238,13 @@ config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
        help
          This adds support for ASoC Onboard Codec I2S machine driver. This will
          create an alsa sound card for RT5663 + MAX98927.
-         Say Y if you have such a device.
+         Say Y or m if you have such a device. This is a recommended option.
          If unsure select "N".
 
 config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
-        tristate "ASoC Audio driver for KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
-        depends on X86_INTEL_LPSS && I2C && SPI
-        select SND_SOC_INTEL_SST
-        depends on SND_SOC_INTEL_SKYLAKE
+        tristate "KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
+        depends on MFD_INTEL_LPSS && I2C && ACPI
+        depends on SPI
         select SND_SOC_RT5663
         select SND_SOC_RT5514
         select SND_SOC_RT5514_SPI
@@ -259,7 +253,8 @@ config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
         help
           This adds support for ASoC Onboard Codec I2S machine driver. This will
           create an alsa sound card for RT5663 + RT5514 + MAX98927.
-          Say Y if you have such a device.
+          Say Y or m if you have such a device. This is a recommended option.
           If unsure select "N".
+endif ## SND_SOC_INTEL_SKYLAKE
 
-endif
+endif ## SND_SOC_INTEL_MACH
index c4d82ad..2179ded 100644 (file)
@@ -219,7 +219,7 @@ static struct snd_soc_card bytcht_da7213_card = {
        .num_dapm_routes = ARRAY_SIZE(audio_map),
 };
 
-static char codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char codec_name[SND_ACPI_I2C_ID_LEN];
 
 static int bytcht_da7213_probe(struct platform_device *pdev)
 {
@@ -243,7 +243,7 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
        }
 
        /* fixup codec name based on HID */
-       i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+       i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
        if (i2c_name) {
                snprintf(codec_name, sizeof(codec_name),
                        "%s%s", "i2c-", i2c_name);
index 8088396..305e7f4 100644 (file)
@@ -232,15 +232,39 @@ static struct snd_soc_card byt_cht_es8316_card = {
        .fully_routed = true,
 };
 
+static char codec_name[SND_ACPI_I2C_ID_LEN];
+
 static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
 {
-       int ret = 0;
        struct byt_cht_es8316_private *priv;
+       struct snd_soc_acpi_mach *mach;
+       const char *i2c_name = NULL;
+       int dai_index = 0;
+       int i;
+       int ret = 0;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
        if (!priv)
                return -ENOMEM;
 
+       mach = (&pdev->dev)->platform_data;
+       /* fix index of codec dai */
+       for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
+               if (!strcmp(byt_cht_es8316_dais[i].codec_name,
+                           "i2c-ESSX8316:00")) {
+                       dai_index = i;
+                       break;
+               }
+       }
+
+       /* fixup codec name based on HID */
+       i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
+       if (i2c_name) {
+               snprintf(codec_name, sizeof(codec_name),
+                       "%s%s", "i2c-", i2c_name);
+               byt_cht_es8316_dais[dai_index].codec_name = codec_name;
+       }
+
        /* register the soc card */
        byt_cht_es8316_card.dev = &pdev->dev;
        snd_soc_card_set_drvdata(&byt_cht_es8316_card, priv);
index f2c0fc4..b6a1cfe 100644 (file)
@@ -713,7 +713,7 @@ static struct snd_soc_card byt_rt5640_card = {
        .fully_routed = true,
 };
 
-static char byt_rt5640_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char byt_rt5640_codec_name[SND_ACPI_I2C_ID_LEN];
 static char byt_rt5640_codec_aif_name[12]; /*  = "rt5640-aif[1|2]" */
 static char byt_rt5640_cpu_dai_name[10]; /*  = "ssp[0|2]-port" */
 
@@ -762,7 +762,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
        }
 
        /* fixup codec name based on HID */
-       i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+       i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
        if (i2c_name) {
                snprintf(byt_rt5640_codec_name, sizeof(byt_rt5640_codec_name),
                        "%s%s", "i2c-", i2c_name);
index d955836..456526a 100644 (file)
@@ -38,6 +38,8 @@ enum {
        BYT_RT5651_DMIC_MAP,
        BYT_RT5651_IN1_MAP,
        BYT_RT5651_IN2_MAP,
+       BYT_RT5651_IN1_IN2_MAP,
+       BYT_RT5651_IN3_MAP,
 };
 
 #define BYT_RT5651_MAP(quirk)  ((quirk) & GENMASK(7, 0))
@@ -62,6 +64,8 @@ static void log_quirks(struct device *dev)
                dev_info(dev, "quirk IN1_MAP enabled");
        if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_MAP)
                dev_info(dev, "quirk IN2_MAP enabled");
+       if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN3_MAP)
+               dev_info(dev, "quirk IN3_MAP enabled");
        if (byt_rt5651_quirk & BYT_RT5651_DMIC_EN)
                dev_info(dev, "quirk DMIC enabled");
        if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN)
@@ -127,6 +131,7 @@ static const struct snd_soc_dapm_widget byt_rt5651_widgets[] = {
        SND_SOC_DAPM_MIC("Headset Mic", NULL),
        SND_SOC_DAPM_MIC("Internal Mic", NULL),
        SND_SOC_DAPM_SPK("Speaker", NULL),
+       SND_SOC_DAPM_LINE("Line In", NULL),
        SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
                            platform_clock_control, SND_SOC_DAPM_PRE_PMU |
                            SND_SOC_DAPM_POST_PMD),
@@ -138,6 +143,7 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
        {"Headset Mic", NULL, "Platform Clock"},
        {"Internal Mic", NULL, "Platform Clock"},
        {"Speaker", NULL, "Platform Clock"},
+       {"Line In", NULL, "Platform Clock"},
 
        {"AIF1 Playback", NULL, "ssp2 Tx"},
        {"ssp2 Tx", NULL, "codec_out0"},
@@ -151,6 +157,9 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
        {"Headphone", NULL, "HPOR"},
        {"Speaker", NULL, "LOUTL"},
        {"Speaker", NULL, "LOUTR"},
+       {"IN2P", NULL, "Line In"},
+       {"IN2N", NULL, "Line In"},
+
 };
 
 static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic_map[] = {
@@ -171,11 +180,25 @@ static const struct snd_soc_dapm_route byt_rt5651_intmic_in2_map[] = {
        {"IN2P", NULL, "Internal Mic"},
 };
 
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_in2_map[] = {
+       {"Internal Mic", NULL, "micbias1"},
+       {"IN1P", NULL, "Internal Mic"},
+       {"IN2P", NULL, "Internal Mic"},
+       {"IN3P", NULL, "Headset Mic"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in3_map[] = {
+       {"Internal Mic", NULL, "micbias1"},
+       {"IN3P", NULL, "Headset Mic"},
+       {"IN1P", NULL, "Internal Mic"},
+};
+
 static const struct snd_kcontrol_new byt_rt5651_controls[] = {
        SOC_DAPM_PIN_SWITCH("Headphone"),
        SOC_DAPM_PIN_SWITCH("Headset Mic"),
        SOC_DAPM_PIN_SWITCH("Internal Mic"),
        SOC_DAPM_PIN_SWITCH("Speaker"),
+       SOC_DAPM_PIN_SWITCH("Line In"),
 };
 
 static struct snd_soc_jack_pin bytcr_jack_pins[] = {
@@ -247,8 +270,16 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
                },
-               .driver_data = (void *)(BYT_RT5651_DMIC_MAP |
-                                       BYT_RT5651_DMIC_EN),
+               .driver_data = (void *)(BYT_RT5651_IN3_MAP),
+       },
+       {
+               .callback = byt_rt5651_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ADI"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Turbot"),
+               },
+               .driver_data = (void *)(BYT_RT5651_MCLK_EN |
+                                       BYT_RT5651_IN3_MAP),
        },
        {
                .callback = byt_rt5651_quirk_cb,
@@ -256,7 +287,8 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "KIANO"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "KIANO SlimNote 14.2"),
                },
-               .driver_data = (void *)(BYT_RT5651_IN2_MAP),
+               .driver_data = (void *)(BYT_RT5651_MCLK_EN |
+                                       BYT_RT5651_IN1_IN2_MAP),
        },
        {}
 };
@@ -281,6 +313,14 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
                custom_map = byt_rt5651_intmic_in2_map;
                num_routes = ARRAY_SIZE(byt_rt5651_intmic_in2_map);
                break;
+       case BYT_RT5651_IN1_IN2_MAP:
+               custom_map = byt_rt5651_intmic_in1_in2_map;
+               num_routes = ARRAY_SIZE(byt_rt5651_intmic_in1_in2_map);
+               break;
+       case BYT_RT5651_IN3_MAP:
+               custom_map = byt_rt5651_intmic_in3_map;
+               num_routes = ARRAY_SIZE(byt_rt5651_intmic_in3_map);
+               break;
        default:
                custom_map = byt_rt5651_intmic_dmic_map;
                num_routes = ARRAY_SIZE(byt_rt5651_intmic_dmic_map);
@@ -469,7 +509,7 @@ static struct snd_soc_card byt_rt5651_card = {
        .fully_routed = true,
 };
 
-static char byt_rt5651_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char byt_rt5651_codec_name[SND_ACPI_I2C_ID_LEN];
 
 static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
 {
@@ -499,7 +539,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
        }
 
        /* fixup codec name based on HID */
-       i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+       i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
        if (i2c_name) {
                snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
                        "%s%s", "i2c-", i2c_name);
index 18d129c..31641aa 100644 (file)
@@ -49,7 +49,7 @@ struct cht_acpi_card {
 struct cht_mc_private {
        struct snd_soc_jack jack;
        struct cht_acpi_card *acpi_card;
-       char codec_name[16];
+       char codec_name[SND_ACPI_I2C_ID_LEN];
        struct clk *mclk;
 };
 
@@ -118,6 +118,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
        SND_SOC_DAPM_HP("Headphone", NULL),
        SND_SOC_DAPM_MIC("Headset Mic", NULL),
        SND_SOC_DAPM_MIC("Int Mic", NULL),
+       SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
        SND_SOC_DAPM_SPK("Ext Spk", NULL),
        SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
                        platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -128,6 +129,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
        {"IN1N", NULL, "Headset Mic"},
        {"DMIC L1", NULL, "Int Mic"},
        {"DMIC R1", NULL, "Int Mic"},
+       {"IN2P", NULL, "Int Analog Mic"},
+       {"IN2N", NULL, "Int Analog Mic"},
        {"Headphone", NULL, "HPOL"},
        {"Headphone", NULL, "HPOR"},
        {"Ext Spk", NULL, "SPOL"},
@@ -135,6 +138,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
        {"Headphone", NULL, "Platform Clock"},
        {"Headset Mic", NULL, "Platform Clock"},
        {"Int Mic", NULL, "Platform Clock"},
+       {"Int Analog Mic", NULL, "Platform Clock"},
+       {"Int Analog Mic", NULL, "micbias1"},
+       {"Int Analog Mic", NULL, "micbias2"},
        {"Ext Spk", NULL, "Platform Clock"},
 };
 
@@ -189,6 +195,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
        SOC_DAPM_PIN_SWITCH("Headphone"),
        SOC_DAPM_PIN_SWITCH("Headset Mic"),
        SOC_DAPM_PIN_SWITCH("Int Mic"),
+       SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
        SOC_DAPM_PIN_SWITCH("Ext Spk"),
 };
 
@@ -499,7 +506,7 @@ static struct cht_acpi_card snd_soc_cards[] = {
        {"10EC5650", CODEC_TYPE_RT5650, &snd_soc_card_chtrt5650},
 };
 
-static char cht_rt5645_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char cht_rt5645_codec_name[SND_ACPI_I2C_ID_LEN];
 static char cht_rt5645_codec_aif_name[12]; /*  = "rt5645-aif[1|2]" */
 static char cht_rt5645_cpu_dai_name[10]; /*  = "ssp[0|2]-port" */
 
@@ -566,7 +573,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
                }
 
        /* fixup codec name based on HID */
-       i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+       i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
        if (i2c_name) {
                snprintf(cht_rt5645_codec_name, sizeof(cht_rt5645_codec_name),
                        "%s%s", "i2c-", i2c_name);
index f8f21ee..c14a52d 100644 (file)
@@ -35,7 +35,7 @@
 
 struct cht_mc_private {
        struct snd_soc_jack headset;
-       char codec_name[16];
+       char codec_name[SND_ACPI_I2C_ID_LEN];
        struct clk *mclk;
 };
 
@@ -396,7 +396,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
 
        /* fixup codec name based on HID */
        if (mach) {
-               i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+               i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
                if (i2c_name) {
                        snprintf(drv->codec_name, sizeof(drv->codec_name),
                                 "i2c-%s", i2c_name);
index 5e1ea03..3c51607 100644 (file)
@@ -76,7 +76,7 @@ static int haswell_rt5640_hw_params(struct snd_pcm_substream *substream,
        }
 
        /* set correct codec filter for DAI format and clock config */
-       snd_soc_update_bits(rtd->codec, 0x83, 0xffff, 0x8000);
+       snd_soc_component_update_bits(codec_dai->component, 0x83, 0xffff, 0x8000);
 
        return ret;
 }
index 6f9a8bc..bf7014c 100644 (file)
@@ -101,7 +101,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = {
        { "ssp0 Tx", NULL, "spk_out" },
 
        { "AIF Playback", NULL, "ssp1 Tx" },
-       { "ssp1 Tx", NULL, "hs_out" },
+       { "ssp1 Tx", NULL, "codec1_out" },
 
        { "hs_in", NULL, "ssp1 Rx" },
        { "ssp1 Rx", NULL, "AIF Capture" },
@@ -225,7 +225,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
        }
 
        jack = &ctx->kabylake_headset;
-       snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+       snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
index 6072164..90ea98f 100644 (file)
@@ -109,7 +109,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = {
        { "ssp0 Tx", NULL, "spk_out" },
 
        { "AIF Playback", NULL, "ssp1 Tx" },
-       { "ssp1 Tx", NULL, "hs_out" },
+       { "ssp1 Tx", NULL, "codec1_out" },
 
        { "hs_in", NULL, "ssp1 Rx" },
        { "ssp1 Rx", NULL, "AIF Capture" },
@@ -195,7 +195,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
        }
 
        jack = &ctx->kabylake_headset;
-       snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+       snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
        snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/intel/boards/mfld_machine.c b/sound/soc/intel/boards/mfld_machine.c
deleted file mode 100644 (file)
index 6f44acf..0000000
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- *  mfld_machine.c - ASoc Machine driver for Intel Medfield MID platform
- *
- *  Copyright (C) 2010 Intel Corp
- *  Author: Vinod Koul <vinod.koul@intel.com>
- *  Author: Harsha Priya <priya.harsha@intel.com>
- *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/sn95031.h"
-
-#define MID_MONO 1
-#define MID_STEREO 2
-#define MID_MAX_CAP 5
-#define MFLD_JACK_INSERT 0x04
-
-enum soc_mic_bias_zones {
-       MFLD_MV_START = 0,
-       /* mic bias volutage range for Headphones*/
-       MFLD_MV_HP = 400,
-       /* mic bias volutage range for American Headset*/
-       MFLD_MV_AM_HS = 650,
-       /* mic bias volutage range for Headset*/
-       MFLD_MV_HS = 2000,
-       MFLD_MV_UNDEFINED,
-};
-
-static unsigned int    hs_switch;
-static unsigned int    lo_dac;
-static struct snd_soc_codec *mfld_codec;
-
-struct mfld_mc_private {
-       void __iomem *int_base;
-       u8 interrupt_status;
-};
-
-struct snd_soc_jack mfld_jack;
-
-/*Headset jack detection DAPM pins */
-static struct snd_soc_jack_pin mfld_jack_pins[] = {
-       {
-               .pin = "Headphones",
-               .mask = SND_JACK_HEADPHONE,
-       },
-       {
-               .pin = "AMIC1",
-               .mask = SND_JACK_MICROPHONE,
-       },
-};
-
-/* jack detection voltage zones */
-static struct snd_soc_jack_zone mfld_zones[] = {
-       {MFLD_MV_START, MFLD_MV_AM_HS, SND_JACK_HEADPHONE},
-       {MFLD_MV_AM_HS, MFLD_MV_HS, SND_JACK_HEADSET},
-};
-
-/* sound card controls */
-static const char * const headset_switch_text[] = {"Earpiece", "Headset"};
-
-static const char * const lo_text[] = {"Vibra", "Headset", "IHF", "None"};
-
-static const struct soc_enum headset_enum =
-       SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
-
-static const struct soc_enum lo_enum =
-       SOC_ENUM_SINGLE_EXT(4, lo_text);
-
-static int headset_get_switch(struct snd_kcontrol *kcontrol,
-       struct snd_ctl_elem_value *ucontrol)
-{
-       ucontrol->value.enumerated.item[0] = hs_switch;
-       return 0;
-}
-
-static int headset_set_switch(struct snd_kcontrol *kcontrol,
-       struct snd_ctl_elem_value *ucontrol)
-{
-       struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_context *dapm = &card->dapm;
-
-       if (ucontrol->value.enumerated.item[0] == hs_switch)
-               return 0;
-
-       snd_soc_dapm_mutex_lock(dapm);
-
-       if (ucontrol->value.enumerated.item[0]) {
-               pr_debug("hs_set HS path\n");
-               snd_soc_dapm_enable_pin_unlocked(dapm, "Headphones");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
-       } else {
-               pr_debug("hs_set EP path\n");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
-               snd_soc_dapm_enable_pin_unlocked(dapm, "EPOUT");
-       }
-
-       snd_soc_dapm_sync_unlocked(dapm);
-
-       snd_soc_dapm_mutex_unlock(dapm);
-
-       hs_switch = ucontrol->value.enumerated.item[0];
-
-       return 0;
-}
-
-static void lo_enable_out_pins(struct snd_soc_dapm_context *dapm)
-{
-       snd_soc_dapm_enable_pin_unlocked(dapm, "IHFOUTL");
-       snd_soc_dapm_enable_pin_unlocked(dapm, "IHFOUTR");
-       snd_soc_dapm_enable_pin_unlocked(dapm, "LINEOUTL");
-       snd_soc_dapm_enable_pin_unlocked(dapm, "LINEOUTR");
-       snd_soc_dapm_enable_pin_unlocked(dapm, "VIB1OUT");
-       snd_soc_dapm_enable_pin_unlocked(dapm, "VIB2OUT");
-       if (hs_switch) {
-               snd_soc_dapm_enable_pin_unlocked(dapm, "Headphones");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
-       } else {
-               snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
-               snd_soc_dapm_enable_pin_unlocked(dapm, "EPOUT");
-       }
-}
-
-static int lo_get_switch(struct snd_kcontrol *kcontrol,
-       struct snd_ctl_elem_value *ucontrol)
-{
-       ucontrol->value.enumerated.item[0] = lo_dac;
-       return 0;
-}
-
-static int lo_set_switch(struct snd_kcontrol *kcontrol,
-       struct snd_ctl_elem_value *ucontrol)
-{
-       struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_context *dapm = &card->dapm;
-
-       if (ucontrol->value.enumerated.item[0] == lo_dac)
-               return 0;
-
-       snd_soc_dapm_mutex_lock(dapm);
-
-       /* we dont want to work with last state of lineout so just enable all
-        * pins and then disable pins not required
-        */
-       lo_enable_out_pins(dapm);
-
-       switch (ucontrol->value.enumerated.item[0]) {
-       case 0:
-               pr_debug("set vibra path\n");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "VIB1OUT");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "VIB2OUT");
-               snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0);
-               break;
-
-       case 1:
-               pr_debug("set hs  path\n");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
-               snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x22);
-               break;
-
-       case 2:
-               pr_debug("set spkr path\n");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "IHFOUTL");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "IHFOUTR");
-               snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x44);
-               break;
-
-       case 3:
-               pr_debug("set null path\n");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "LINEOUTL");
-               snd_soc_dapm_disable_pin_unlocked(dapm, "LINEOUTR");
-               snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x66);
-               break;
-       }
-
-       snd_soc_dapm_sync_unlocked(dapm);
-
-       snd_soc_dapm_mutex_unlock(dapm);
-
-       lo_dac = ucontrol->value.enumerated.item[0];
-       return 0;
-}
-
-static const struct snd_kcontrol_new mfld_snd_controls[] = {
-       SOC_ENUM_EXT("Playback Switch", headset_enum,
-                       headset_get_switch, headset_set_switch),
-       SOC_ENUM_EXT("Lineout Mux", lo_enum,
-                       lo_get_switch, lo_set_switch),
-};
-
-static const struct snd_soc_dapm_widget mfld_widgets[] = {
-       SND_SOC_DAPM_HP("Headphones", NULL),
-       SND_SOC_DAPM_MIC("Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mfld_map[] = {
-       {"Headphones", NULL, "HPOUTR"},
-       {"Headphones", NULL, "HPOUTL"},
-       {"Mic", NULL, "AMIC1"},
-};
-
-static void mfld_jack_check(unsigned int intr_status)
-{
-       struct mfld_jack_data jack_data;
-
-       if (!mfld_codec)
-               return;
-
-       jack_data.mfld_jack = &mfld_jack;
-       jack_data.intr_id = intr_status;
-
-       sn95031_jack_detection(mfld_codec, &jack_data);
-       /* TODO: add american headset detection post gpiolib support */
-}
-
-static int mfld_init(struct snd_soc_pcm_runtime *runtime)
-{
-       struct snd_soc_dapm_context *dapm = &runtime->card->dapm;
-       int ret_val;
-
-       /* default is earpiece pin, userspace sets it explcitly */
-       snd_soc_dapm_disable_pin(dapm, "Headphones");
-       /* default is lineout NC, userspace sets it explcitly */
-       snd_soc_dapm_disable_pin(dapm, "LINEOUTL");
-       snd_soc_dapm_disable_pin(dapm, "LINEOUTR");
-       lo_dac = 3;
-       hs_switch = 0;
-       /* we dont use linein in this so set to NC */
-       snd_soc_dapm_disable_pin(dapm, "LINEINL");
-       snd_soc_dapm_disable_pin(dapm, "LINEINR");
-
-       /* Headset and button jack detection */
-       ret_val = snd_soc_card_jack_new(runtime->card,
-                       "Intel(R) MID Audio Jack", SND_JACK_HEADSET |
-                       SND_JACK_BTN_0 | SND_JACK_BTN_1, &mfld_jack,
-                       mfld_jack_pins, ARRAY_SIZE(mfld_jack_pins));
-       if (ret_val) {
-               pr_err("jack creation failed\n");
-               return ret_val;
-       }
-
-       ret_val = snd_soc_jack_add_zones(&mfld_jack,
-                       ARRAY_SIZE(mfld_zones), mfld_zones);
-       if (ret_val) {
-               pr_err("adding jack zones failed\n");
-               return ret_val;
-       }
-
-       mfld_codec = runtime->codec;
-
-       /* we want to check if anything is inserted at boot,
-        * so send a fake event to codec and it will read adc
-        * to find if anything is there or not */
-       mfld_jack_check(MFLD_JACK_INSERT);
-       return ret_val;
-}
-
-static struct snd_soc_dai_link mfld_msic_dailink[] = {
-       {
-               .name = "Medfield Headset",
-               .stream_name = "Headset",
-               .cpu_dai_name = "Headset-cpu-dai",
-               .codec_dai_name = "SN95031 Headset",
-               .codec_name = "sn95031",
-               .platform_name = "sst-platform",
-               .init = mfld_init,
-       },
-       {
-               .name = "Medfield Speaker",
-               .stream_name = "Speaker",
-               .cpu_dai_name = "Speaker-cpu-dai",
-               .codec_dai_name = "SN95031 Speaker",
-               .codec_name = "sn95031",
-               .platform_name = "sst-platform",
-               .init = NULL,
-       },
-       {
-               .name = "Medfield Vibra",
-               .stream_name = "Vibra1",
-               .cpu_dai_name = "Vibra1-cpu-dai",
-               .codec_dai_name = "SN95031 Vibra1",
-               .codec_name = "sn95031",
-               .platform_name = "sst-platform",
-               .init = NULL,
-       },
-       {
-               .name = "Medfield Haptics",
-               .stream_name = "Vibra2",
-               .cpu_dai_name = "Vibra2-cpu-dai",
-               .codec_dai_name = "SN95031 Vibra2",
-               .codec_name = "sn95031",
-               .platform_name = "sst-platform",
-               .init = NULL,
-       },
-       {
-               .name = "Medfield Compress",
-               .stream_name = "Speaker",
-               .cpu_dai_name = "Compress-cpu-dai",
-               .codec_dai_name = "SN95031 Speaker",
-               .codec_name = "sn95031",
-               .platform_name = "sst-platform",
-               .init = NULL,
-       },
-};
-
-/* SoC card */
-static struct snd_soc_card snd_soc_card_mfld = {
-       .name = "medfield_audio",
-       .owner = THIS_MODULE,
-       .dai_link = mfld_msic_dailink,
-       .num_links = ARRAY_SIZE(mfld_msic_dailink),
-
-       .controls = mfld_snd_controls,
-       .num_controls = ARRAY_SIZE(mfld_snd_controls),
-       .dapm_widgets = mfld_widgets,
-       .num_dapm_widgets = ARRAY_SIZE(mfld_widgets),
-       .dapm_routes = mfld_map,
-       .num_dapm_routes = ARRAY_SIZE(mfld_map),
-};
-
-static irqreturn_t snd_mfld_jack_intr_handler(int irq, void *dev)
-{
-       struct mfld_mc_private *mc_private = (struct mfld_mc_private *) dev;
-
-       memcpy_fromio(&mc_private->interrupt_status,
-                       ((void *)(mc_private->int_base)),
-                       sizeof(u8));
-       return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
-{
-       struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data;
-
-       mfld_jack_check(mc_drv_ctx->interrupt_status);
-
-       return IRQ_HANDLED;
-}
-
-static int snd_mfld_mc_probe(struct platform_device *pdev)
-{
-       int ret_val = 0, irq;
-       struct mfld_mc_private *mc_drv_ctx;
-       struct resource *irq_mem;
-
-       pr_debug("snd_mfld_mc_probe called\n");
-
-       /* retrive the irq number */
-       irq = platform_get_irq(pdev, 0);
-
-       /* audio interrupt base of SRAM location where
-        * interrupts are stored by System FW */
-       mc_drv_ctx = devm_kzalloc(&pdev->dev, sizeof(*mc_drv_ctx), GFP_ATOMIC);
-       if (!mc_drv_ctx)
-               return -ENOMEM;
-
-       irq_mem = platform_get_resource_byname(
-                               pdev, IORESOURCE_MEM, "IRQ_BASE");
-       if (!irq_mem) {
-               pr_err("no mem resource given\n");
-               return -ENODEV;
-       }
-       mc_drv_ctx->int_base = devm_ioremap_nocache(&pdev->dev, irq_mem->start,
-                                                   resource_size(irq_mem));
-       if (!mc_drv_ctx->int_base) {
-               pr_err("Mapping of cache failed\n");
-               return -ENOMEM;
-       }
-       /* register for interrupt */
-       ret_val = devm_request_threaded_irq(&pdev->dev, irq,
-                       snd_mfld_jack_intr_handler,
-                       snd_mfld_jack_detection,
-                       IRQF_SHARED, pdev->dev.driver->name, mc_drv_ctx);
-       if (ret_val) {
-               pr_err("cannot register IRQ\n");
-               return ret_val;
-       }
-       /* register the soc card */
-       snd_soc_card_mfld.dev = &pdev->dev;
-       ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_mfld);
-       if (ret_val) {
-               pr_debug("snd_soc_register_card failed %d\n", ret_val);
-               return ret_val;
-       }
-       platform_set_drvdata(pdev, mc_drv_ctx);
-       pr_debug("successfully exited probe\n");
-       return 0;
-}
-
-static struct platform_driver snd_mfld_mc_driver = {
-       .driver = {
-               .name = "msic_audio",
-       },
-       .probe = snd_mfld_mc_probe,
-};
-
-module_platform_driver(snd_mfld_mc_driver);
-
-MODULE_DESCRIPTION("ASoC Intel(R) MID Machine driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:msic-audio");
index 11c0805..fd82f4b 100644 (file)
@@ -269,7 +269,7 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
         */
 
        timeout = jiffies + msecs_to_jiffies(time);
-       while (((sst_dsp_shim_read_unlocked(ctx, offset) & mask) != target)
+       while ((((reg = sst_dsp_shim_read_unlocked(ctx, offset)) & mask) != target)
                && time_before(jiffies, timeout)) {
                k++;
                if (k > 10)
@@ -278,8 +278,6 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
                usleep_range(s, 2*s);
        }
 
-       reg = sst_dsp_shim_read_unlocked(ctx, offset);
-
        if ((reg & mask) == target) {
                dev_dbg(ctx->dev, "FW Poll Status: reg=%#x %s successful\n",
                                        reg, operation);
index 4524211..440bca7 100644 (file)
@@ -595,7 +595,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
        INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
        skl->d0i3.state = SKL_DSP_D0I3_NONE;
 
-       return 0;
+       return skl_dsp_acquire_irq(sst);
 }
 EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
 
index 387de38..245df10 100644 (file)
@@ -458,7 +458,7 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
        cnl->boot_complete = false;
        init_waitqueue_head(&cnl->boot_wait);
 
-       return 0;
+       return skl_dsp_acquire_irq(sst);
 }
 EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
 
diff --git a/sound/soc/intel/skylake/skl-i2s.h b/sound/soc/intel/skylake/skl-i2s.h
new file mode 100644 (file)
index 0000000..dcf819b
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ *  skl-i2s.h - i2s blob mapping
+ *
+ *  Copyright (C) 2017 Intel Corp
+ *  Author: Subhransu S. Prusty < subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SOUND_SOC_SKL_I2S_H
+#define __SOUND_SOC_SKL_I2S_H
+
+#define SKL_I2S_MAX_TIME_SLOTS         8
+#define SKL_MCLK_DIV_CLK_SRC_MASK      GENMASK(17, 16)
+
+#define SKL_MNDSS_DIV_CLK_SRC_MASK     GENMASK(21, 20)
+#define SKL_SHIFT(x)                   (ffs(x) - 1)
+#define SKL_MCLK_DIV_RATIO_MASK                GENMASK(11, 0)
+
+struct skl_i2s_config {
+       u32 ssc0;
+       u32 ssc1;
+       u32 sscto;
+       u32 sspsp;
+       u32 sstsa;
+       u32 ssrsa;
+       u32 ssc2;
+       u32 sspsp2;
+       u32 ssc3;
+       u32 ssioc;
+} __packed;
+
+struct skl_i2s_config_mclk {
+       u32 mdivctrl;
+       u32 mdivr;
+};
+
+/**
+ * struct skl_i2s_config_blob_legacy - Structure defines I2S Gateway
+ * configuration legacy blob
+ *
+ * @gtw_attr:          Gateway attribute for the I2S Gateway
+ * @tdm_ts_group:      TDM slot mapping against channels in the Gateway.
+ * @i2s_cfg:           I2S HW registers
+ * @mclk:              MCLK clock source and divider values
+ */
+struct skl_i2s_config_blob_legacy {
+       u32 gtw_attr;
+       u32 tdm_ts_group[SKL_I2S_MAX_TIME_SLOTS];
+       struct skl_i2s_config i2s_cfg;
+       struct skl_i2s_config_mclk mclk;
+};
+
+#endif /* __SOUND_SOC_SKL_I2S_H */
index 61b5bfa..8cbf080 100644 (file)
@@ -55,6 +55,19 @@ static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
        return 0;
 }
 
+#define SKL_ASTATE_PARAM_ID    4
+
+void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data)
+{
+       struct skl_ipc_large_config_msg msg = {0};
+
+       msg.large_param_id = SKL_ASTATE_PARAM_ID;
+       msg.param_data_size = (cnt * sizeof(struct skl_astate_param) +
+                               sizeof(cnt));
+
+       skl_ipc_set_large_config(&ctx->ipc, &msg, data);
+}
+
 #define NOTIFICATION_PARAM_ID 3
 #define NOTIFICATION_MASK 0xf
 
@@ -404,11 +417,20 @@ int skl_resume_dsp(struct skl *skl)
        if (skl->skl_sst->is_first_boot == true)
                return 0;
 
+       /* disable dynamic clock gating during fw and lib download */
+       ctx->enable_miscbdcge(ctx->dev, false);
+
        ret = skl_dsp_wake(ctx->dsp);
+       ctx->enable_miscbdcge(ctx->dev, true);
        if (ret < 0)
                return ret;
 
        skl_dsp_enable_notification(skl->skl_sst, false);
+
+       if (skl->cfg.astate_cfg != NULL) {
+               skl_dsp_set_astate_cfg(skl->skl_sst, skl->cfg.astate_cfg->count,
+                                       skl->cfg.astate_cfg);
+       }
        return ret;
 }
 
index d14c50a..3b1d2b8 100644 (file)
@@ -19,6 +19,7 @@
  */
 #include <linux/pci.h>
 #include "skl.h"
+#include "skl-i2s.h"
 
 #define NHLT_ACPI_HEADER_SIG   "NHLT"
 
@@ -43,7 +44,8 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
        obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
        if (obj && obj->type == ACPI_TYPE_BUFFER) {
                nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
-               nhlt_table = (struct nhlt_acpi_table *)
+               if (nhlt_ptr->length)
+                       nhlt_table = (struct nhlt_acpi_table *)
                                memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
                                MEMREMAP_WB);
                ACPI_FREE(obj);
@@ -119,11 +121,16 @@ static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt,
 
        if ((epnt->virtual_bus_id == instance_id) &&
                        (epnt->linktype == link_type) &&
-                       (epnt->direction == dirn) &&
-                       (epnt->device_type == dev_type))
-               return true;
-       else
-               return false;
+                       (epnt->direction == dirn)) {
+               /* do not check dev_type for DMIC link type */
+               if (epnt->linktype == NHLT_LINK_DMIC)
+                       return true;
+
+               if (epnt->device_type == dev_type)
+                       return true;
+       }
+
+       return false;
 }
 
 struct nhlt_specific_cfg
@@ -271,3 +278,157 @@ void skl_nhlt_remove_sysfs(struct skl *skl)
 
        sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
 }
+
+/*
+ * Queries NHLT for all the fmt configuration for a particular endpoint and
+ * stores all possible rates supported in a rate table for the corresponding
+ * sclk/sclkfs.
+ */
+static void skl_get_ssp_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks,
+                               struct nhlt_fmt *fmt, u8 id)
+{
+       struct skl_i2s_config_blob_legacy *i2s_config;
+       struct skl_clk_parent_src *parent;
+       struct skl_ssp_clk *sclk, *sclkfs;
+       struct nhlt_fmt_cfg *fmt_cfg;
+       struct wav_fmt_ext *wav_fmt;
+       unsigned long rate = 0;
+       bool present = false;
+       int rate_index = 0;
+       u16 channels, bps;
+       u8 clk_src;
+       int i, j;
+       u32 fs;
+
+       sclk = &ssp_clks[SKL_SCLK_OFS];
+       sclkfs = &ssp_clks[SKL_SCLKFS_OFS];
+
+       if (fmt->fmt_count == 0)
+               return;
+
+       for (i = 0; i < fmt->fmt_count; i++) {
+               fmt_cfg = &fmt->fmt_config[i];
+               wav_fmt = &fmt_cfg->fmt_ext;
+
+               channels = wav_fmt->fmt.channels;
+               bps = wav_fmt->fmt.bits_per_sample;
+               fs = wav_fmt->fmt.samples_per_sec;
+
+               /*
+                * In case of TDM configuration on a ssp, there can
+                * be more than one blob in which channel masks are
+                * different for each usecase for a specific rate and bps.
+                * But the sclk rate will be generated for the total
+                * number of channels used for that endpoint.
+                *
+                * So for the given fs and bps, choose blob which has
+                * the superset of all channels for that endpoint and
+                * derive the rate.
+                */
+               for (j = i; j < fmt->fmt_count; j++) {
+                       fmt_cfg = &fmt->fmt_config[j];
+                       wav_fmt = &fmt_cfg->fmt_ext;
+                       if ((fs == wav_fmt->fmt.samples_per_sec) &&
+                          (bps == wav_fmt->fmt.bits_per_sample))
+                               channels = max_t(u16, channels,
+                                               wav_fmt->fmt.channels);
+               }
+
+               rate = channels * bps * fs;
+
+               /* check if the rate is added already to the given SSP's sclk */
+               for (j = 0; (j < SKL_MAX_CLK_RATES) &&
+                           (sclk[id].rate_cfg[j].rate != 0); j++) {
+                       if (sclk[id].rate_cfg[j].rate == rate) {
+                               present = true;
+                               break;
+                       }
+               }
+
+               /* Fill rate and parent for sclk/sclkfs */
+               if (!present) {
+                       /* MCLK Divider Source Select */
+                       i2s_config = (struct skl_i2s_config_blob_legacy *)
+                                               fmt->fmt_config[0].config.caps;
+                       clk_src = ((i2s_config->mclk.mdivctrl)
+                                       & SKL_MNDSS_DIV_CLK_SRC_MASK) >>
+                                       SKL_SHIFT(SKL_MNDSS_DIV_CLK_SRC_MASK);
+
+                       parent = skl_get_parent_clk(clk_src);
+
+                       /*
+                        * Do not copy the config data if there is no parent
+                        * clock available for this clock source select
+                        */
+                       if (!parent)
+                               continue;
+
+                       sclk[id].rate_cfg[rate_index].rate = rate;
+                       sclk[id].rate_cfg[rate_index].config = fmt_cfg;
+                       sclkfs[id].rate_cfg[rate_index].rate = rate;
+                       sclkfs[id].rate_cfg[rate_index].config = fmt_cfg;
+                       sclk[id].parent_name = parent->name;
+                       sclkfs[id].parent_name = parent->name;
+
+                       rate_index++;
+               }
+       }
+}
+
+static void skl_get_mclk(struct skl *skl, struct skl_ssp_clk *mclk,
+                               struct nhlt_fmt *fmt, u8 id)
+{
+       struct skl_i2s_config_blob_legacy *i2s_config;
+       struct nhlt_specific_cfg *fmt_cfg;
+       struct skl_clk_parent_src *parent;
+       u32 clkdiv, div_ratio;
+       u8 clk_src;
+
+       fmt_cfg = &fmt->fmt_config[0].config;
+       i2s_config = (struct skl_i2s_config_blob_legacy *)fmt_cfg->caps;
+
+       /* MCLK Divider Source Select */
+       clk_src = ((i2s_config->mclk.mdivctrl) & SKL_MCLK_DIV_CLK_SRC_MASK) >>
+                                       SKL_SHIFT(SKL_MCLK_DIV_CLK_SRC_MASK);
+
+       clkdiv = i2s_config->mclk.mdivr & SKL_MCLK_DIV_RATIO_MASK;
+
+       /* bypass divider */
+       div_ratio = 1;
+
+       if (clkdiv != SKL_MCLK_DIV_RATIO_MASK)
+               /* Divider is 2 + clkdiv */
+               div_ratio = clkdiv + 2;
+
+       /* Calculate MCLK rate from source using div value */
+       parent = skl_get_parent_clk(clk_src);
+       if (!parent)
+               return;
+
+       mclk[id].rate_cfg[0].rate = parent->rate/div_ratio;
+       mclk[id].rate_cfg[0].config = &fmt->fmt_config[0];
+       mclk[id].parent_name = parent->name;
+}
+
+void skl_get_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks)
+{
+       struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+       struct nhlt_endpoint *epnt;
+       struct nhlt_fmt *fmt;
+       int i;
+       u8 id;
+
+       epnt = (struct nhlt_endpoint *)nhlt->desc;
+       for (i = 0; i < nhlt->endpoint_count; i++) {
+               if (epnt->linktype == NHLT_LINK_SSP) {
+                       id = epnt->virtual_bus_id;
+
+                       fmt = (struct nhlt_fmt *)(epnt->config.caps
+                                       + epnt->config.size);
+
+                       skl_get_ssp_clks(skl, ssp_clks, fmt, id);
+                       skl_get_mclk(skl, ssp_clks, fmt, id);
+               }
+               epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+       }
+}
index 1dd9747..e468285 100644 (file)
@@ -537,7 +537,7 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
 
        snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
 
-       link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+       link = snd_hdac_ext_bus_get_link(ebus, codec_dai->component->name);
        if (!link)
                return -EINVAL;
 
@@ -620,7 +620,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
 
        link_dev->link_prepared = 0;
 
-       link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+       link = snd_hdac_ext_bus_get_link(ebus, rtd->codec_dai->component->name);
        if (!link)
                return -EINVAL;
 
@@ -1343,7 +1343,11 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
                        return -EIO;
                }
 
+               /* disable dynamic clock gating during fw and lib download */
+               skl->skl_sst->enable_miscbdcge(platform->dev, false);
+
                ret = ops->init_fw(platform->dev, skl->skl_sst);
+               skl->skl_sst->enable_miscbdcge(platform->dev, true);
                if (ret < 0) {
                        dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
                        return ret;
@@ -1351,6 +1355,12 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
                skl_populate_modules(skl);
                skl->skl_sst->update_d0i3c = skl_update_d0i3c;
                skl_dsp_enable_notification(skl->skl_sst, false);
+
+               if (skl->cfg.astate_cfg != NULL) {
+                       skl_dsp_set_astate_cfg(skl->skl_sst,
+                                       skl->cfg.astate_cfg->count,
+                                       skl->cfg.astate_cfg);
+               }
        }
        pm_runtime_mark_last_busy(platform->dev);
        pm_runtime_put_autosuspend(platform->dev);
diff --git a/sound/soc/intel/skylake/skl-ssp-clk.h b/sound/soc/intel/skylake/skl-ssp-clk.h
new file mode 100644 (file)
index 0000000..c9ea840
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ *  skl-ssp-clk.h - Skylake ssp clock information and ipc structure
+ *
+ *  Copyright (C) 2017 Intel Corp
+ *  Author: Jaikrishna Nemallapudi <jaikrishnax.nemallapudi@intel.com>
+ *  Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef SOUND_SOC_SKL_SSP_CLK_H
+#define SOUND_SOC_SKL_SSP_CLK_H
+
+#define SKL_MAX_SSP            6
+/* xtal/cardinal/pll, parent of ssp clocks and mclk */
+#define SKL_MAX_CLK_SRC                3
+#define SKL_MAX_SSP_CLK_TYPES  3 /* mclk, sclk, sclkfs */
+
+#define SKL_MAX_CLK_CNT                (SKL_MAX_SSP * SKL_MAX_SSP_CLK_TYPES)
+
+/* Max number of configurations supported for each clock */
+#define SKL_MAX_CLK_RATES      10
+
+#define SKL_SCLK_OFS           SKL_MAX_SSP
+#define SKL_SCLKFS_OFS         (SKL_SCLK_OFS + SKL_MAX_SSP)
+
+enum skl_clk_type {
+       SKL_MCLK,
+       SKL_SCLK,
+       SKL_SCLK_FS,
+};
+
+enum skl_clk_src_type {
+       SKL_XTAL,
+       SKL_CARDINAL,
+       SKL_PLL,
+};
+
+struct skl_clk_parent_src {
+       u8 clk_id;
+       const char *name;
+       unsigned long rate;
+       const char *parent_name;
+};
+
+struct skl_clk_rate_cfg_table {
+       unsigned long rate;
+       void *config;
+};
+
+/*
+ * rate for mclk will be in rates[0]. For sclk and sclkfs, rates[] store
+ * all possible clocks ssp can generate for that platform.
+ */
+struct skl_ssp_clk {
+       const char *name;
+       const char *parent_name;
+       struct skl_clk_rate_cfg_table rate_cfg[SKL_MAX_CLK_RATES];
+};
+
+struct skl_clk_pdata {
+       struct skl_clk_parent_src *parent_clks;
+       int num_clks;
+       struct skl_ssp_clk *ssp_clks;
+       void *pvt_data;
+};
+
+#endif /* SOUND_SOC_SKL_SSP_CLK_H */
index 19ee1d4..71e31ad 100644 (file)
@@ -435,16 +435,22 @@ struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
                        return NULL;
        }
 
+       return sst;
+}
+
+int skl_dsp_acquire_irq(struct sst_dsp *sst)
+{
+       struct sst_dsp_device *sst_dev = sst->sst_dev;
+       int ret;
+
        /* Register the ISR */
        ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
                sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
-       if (ret) {
+       if (ret)
                dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
                               sst->irq);
-               return NULL;
-       }
 
-       return sst;
+       return ret;
 }
 
 void skl_dsp_free(struct sst_dsp *dsp)
index eba20d3..12fc9a7 100644 (file)
@@ -206,6 +206,7 @@ int skl_cldma_wait_interruptible(struct sst_dsp *ctx);
 void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
 struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
                struct sst_dsp_device *sst_dev, int irq);
+int skl_dsp_acquire_irq(struct sst_dsp *sst);
 bool is_skl_dsp_running(struct sst_dsp *ctx);
 
 unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx);
@@ -251,6 +252,9 @@ void skl_freeup_uuid_list(struct skl_sst *ctx);
 
 int skl_dsp_strip_extended_manifest(struct firmware *fw);
 void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable);
+
+void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data);
+
 int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name,
                struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp,
                struct sst_dsp_device *skl_dev);
index 8ff8928..2ae4056 100644 (file)
@@ -178,7 +178,8 @@ static inline int skl_pvtid_128(struct uuid_module *module)
  * skl_get_pvt_id: generate a private id for use as module id
  *
  * @ctx: driver context
- * @mconfig: module configuration data
+ * @uuid_mod: module's uuid
+ * @instance_id: module's instance id
  *
  * This generates a 128 bit private unique id for a module TYPE so that
  * module instance is unique
@@ -208,7 +209,8 @@ EXPORT_SYMBOL_GPL(skl_get_pvt_id);
  * skl_put_pvt_id: free up the private id allocated
  *
  * @ctx: driver context
- * @mconfig: module configuration data
+ * @uuid_mod: module's uuid
+ * @pvt_id: module pvt id
  *
  * This frees a 128 bit private unique id previously generated
  */
index a436abf..5a7e41b 100644 (file)
@@ -569,7 +569,7 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
 
        sst->fw_ops = skl_fw_ops;
 
-       return 0;
+       return skl_dsp_acquire_irq(sst);
 }
 EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
 
index a072bcf..73af6e1 100644 (file)
@@ -190,7 +190,6 @@ skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
        u8 res_idx = mconfig->res_idx;
        struct skl_module_res *res = &mconfig->module->resources[res_idx];
 
-       res = &mconfig->module->resources[res_idx];
        skl->resource.mcps -= res->cps;
 }
 
@@ -2908,7 +2907,7 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
                break;
 
        default:
-               dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
+               dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
                        hdr->ops.get, hdr->ops.put, hdr->ops.info);
                break;
        }
@@ -3056,11 +3055,13 @@ static int skl_tplg_get_int_tkn(struct device *dev,
                struct snd_soc_tplg_vendor_value_elem *tkn_elem,
                struct skl *skl)
 {
-       int tkn_count = 0, ret;
+       int tkn_count = 0, ret, size;
        static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
        struct skl_module_res *res = NULL;
        struct skl_module_iface *fmt = NULL;
        struct skl_module *mod = NULL;
+       static struct skl_astate_param *astate_table;
+       static int astate_cfg_idx, count;
        int i;
 
        if (skl->modules) {
@@ -3093,6 +3094,46 @@ static int skl_tplg_get_int_tkn(struct device *dev,
                mod_idx = tkn_elem->value;
                break;
 
+       case SKL_TKN_U32_ASTATE_COUNT:
+               if (astate_table != NULL) {
+                       dev_err(dev, "More than one entry for A-State count");
+                       return -EINVAL;
+               }
+
+               if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
+                       dev_err(dev, "Invalid A-State count %d\n",
+                               tkn_elem->value);
+                       return -EINVAL;
+               }
+
+               size = tkn_elem->value * sizeof(struct skl_astate_param) +
+                               sizeof(count);
+               skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
+               if (!skl->cfg.astate_cfg)
+                       return -ENOMEM;
+
+               astate_table = skl->cfg.astate_cfg->astate_table;
+               count = skl->cfg.astate_cfg->count = tkn_elem->value;
+               break;
+
+       case SKL_TKN_U32_ASTATE_IDX:
+               if (tkn_elem->value >= count) {
+                       dev_err(dev, "Invalid A-State index %d\n",
+                               tkn_elem->value);
+                       return -EINVAL;
+               }
+
+               astate_cfg_idx = tkn_elem->value;
+               break;
+
+       case SKL_TKN_U32_ASTATE_KCPS:
+               astate_table[astate_cfg_idx].kcps = tkn_elem->value;
+               break;
+
+       case SKL_TKN_U32_ASTATE_CLK_SRC:
+               astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
+               break;
+
        case SKL_TKN_U8_IN_PIN_TYPE:
        case SKL_TKN_U8_OUT_PIN_TYPE:
        case SKL_TKN_U8_IN_QUEUE_COUNT:
index 31d8634..32ce64c 100644 (file)
@@ -355,6 +355,7 @@ static int skl_resume(struct device *dev)
 
                if (ebus->cmd_dma_state)
                        snd_hdac_bus_init_cmd_io(&ebus->bus);
+               ret = 0;
        } else {
                ret = _skl_resume(ebus);
 
@@ -435,19 +436,51 @@ static int skl_free(struct hdac_ext_bus *ebus)
        return 0;
 }
 
-static int skl_machine_device_register(struct skl *skl, void *driver_data)
+/*
+ * For each ssp there are 3 clocks (mclk/sclk/sclkfs).
+ * e.g. for ssp0, clocks will be named as
+ *      "ssp0_mclk", "ssp0_sclk", "ssp0_sclkfs"
+ * So for skl+, there are 6 ssps, so 18 clocks will be created.
+ */
+static struct skl_ssp_clk skl_ssp_clks[] = {
+       {.name = "ssp0_mclk"}, {.name = "ssp1_mclk"}, {.name = "ssp2_mclk"},
+       {.name = "ssp3_mclk"}, {.name = "ssp4_mclk"}, {.name = "ssp5_mclk"},
+       {.name = "ssp0_sclk"}, {.name = "ssp1_sclk"}, {.name = "ssp2_sclk"},
+       {.name = "ssp3_sclk"}, {.name = "ssp4_sclk"}, {.name = "ssp5_sclk"},
+       {.name = "ssp0_sclkfs"}, {.name = "ssp1_sclkfs"},
+                                               {.name = "ssp2_sclkfs"},
+       {.name = "ssp3_sclkfs"}, {.name = "ssp4_sclkfs"},
+                                               {.name = "ssp5_sclkfs"},
+};
+
+static int skl_find_machine(struct skl *skl, void *driver_data)
 {
-       struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
-       struct platform_device *pdev;
        struct snd_soc_acpi_mach *mach = driver_data;
-       int ret;
+       struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+       struct skl_machine_pdata *pdata;
 
        mach = snd_soc_acpi_find_machine(mach);
        if (mach == NULL) {
                dev_err(bus->dev, "No matching machine driver found\n");
                return -ENODEV;
        }
+
+       skl->mach = mach;
        skl->fw_name = mach->fw_filename;
+       pdata = skl->mach->pdata;
+
+       if (mach->pdata)
+               skl->use_tplg_pcm = pdata->use_tplg_pcm;
+
+       return 0;
+}
+
+static int skl_machine_device_register(struct skl *skl)
+{
+       struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+       struct snd_soc_acpi_mach *mach = skl->mach;
+       struct platform_device *pdev;
+       int ret;
 
        pdev = platform_device_alloc(mach->drv_name, -1);
        if (pdev == NULL) {
@@ -462,11 +495,8 @@ static int skl_machine_device_register(struct skl *skl, void *driver_data)
                return -EIO;
        }
 
-       if (mach->pdata) {
-               skl->use_tplg_pcm =
-                       ((struct skl_machine_pdata *)mach->pdata)->use_tplg_pcm;
+       if (mach->pdata)
                dev_set_drvdata(&pdev->dev, mach->pdata);
-       }
 
        skl->i2s_dev = pdev;
 
@@ -509,6 +539,74 @@ static void skl_dmic_device_unregister(struct skl *skl)
                platform_device_unregister(skl->dmic_dev);
 }
 
+static struct skl_clk_parent_src skl_clk_src[] = {
+       { .clk_id = SKL_XTAL, .name = "xtal" },
+       { .clk_id = SKL_CARDINAL, .name = "cardinal", .rate = 24576000 },
+       { .clk_id = SKL_PLL, .name = "pll", .rate = 96000000 },
+};
+
+struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(skl_clk_src); i++) {
+               if (skl_clk_src[i].clk_id == clk_id)
+                       return &skl_clk_src[i];
+       }
+
+       return NULL;
+}
+
+static void init_skl_xtal_rate(int pci_id)
+{
+       switch (pci_id) {
+       case 0x9d70:
+       case 0x9d71:
+               skl_clk_src[0].rate = 24000000;
+               return;
+
+       default:
+               skl_clk_src[0].rate = 19200000;
+               return;
+       }
+}
+
+static int skl_clock_device_register(struct skl *skl)
+{
+       struct platform_device_info pdevinfo = {NULL};
+       struct skl_clk_pdata *clk_pdata;
+
+       clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata),
+                                                       GFP_KERNEL);
+       if (!clk_pdata)
+               return -ENOMEM;
+
+       init_skl_xtal_rate(skl->pci->device);
+
+       clk_pdata->parent_clks = skl_clk_src;
+       clk_pdata->ssp_clks = skl_ssp_clks;
+       clk_pdata->num_clks = ARRAY_SIZE(skl_ssp_clks);
+
+       /* Query NHLT to fill the rates and parent */
+       skl_get_clks(skl, clk_pdata->ssp_clks);
+       clk_pdata->pvt_data = skl;
+
+       /* Register Platform device */
+       pdevinfo.parent = &skl->pci->dev;
+       pdevinfo.id = -1;
+       pdevinfo.name = "skl-ssp-clk";
+       pdevinfo.data = clk_pdata;
+       pdevinfo.size_data = sizeof(*clk_pdata);
+       skl->clk_dev = platform_device_register_full(&pdevinfo);
+       return PTR_ERR_OR_ZERO(skl->clk_dev);
+}
+
+static void skl_clock_device_unregister(struct skl *skl)
+{
+       if (skl->clk_dev)
+               platform_device_unregister(skl->clk_dev);
+}
+
 /*
  * Probe the given codec address
  */
@@ -615,18 +713,30 @@ static void skl_probe_work(struct work_struct *work)
        /* create codec instances */
        skl_codec_create(ebus);
 
+       /* register platform dai and controls */
+       err = skl_platform_register(bus->dev);
+       if (err < 0) {
+               dev_err(bus->dev, "platform register failed: %d\n", err);
+               return;
+       }
+
+       if (bus->ppcap) {
+               err = skl_machine_device_register(skl);
+               if (err < 0) {
+                       dev_err(bus->dev, "machine register failed: %d\n", err);
+                       goto out_err;
+               }
+       }
+
        if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
                err = snd_hdac_display_power(bus, false);
                if (err < 0) {
                        dev_err(bus->dev, "Cannot turn off display power on i915\n");
+                       skl_machine_device_unregister(skl);
                        return;
                }
        }
 
-       /* register platform dai and controls */
-       err = skl_platform_register(bus->dev);
-       if (err < 0)
-               return;
        /*
         * we are done probing so decrement link counts
         */
@@ -791,18 +901,21 @@ static int skl_probe(struct pci_dev *pci,
 
        /* check if dsp is there */
        if (bus->ppcap) {
-               err = skl_machine_device_register(skl,
-                                 (void *)pci_id->driver_data);
+               /* create device for dsp clk */
+               err = skl_clock_device_register(skl);
+               if (err < 0)
+                       goto out_clk_free;
+
+               err = skl_find_machine(skl, (void *)pci_id->driver_data);
                if (err < 0)
                        goto out_nhlt_free;
 
                err = skl_init_dsp(skl);
                if (err < 0) {
                        dev_dbg(bus->dev, "error failed to register dsp\n");
-                       goto out_mach_free;
+                       goto out_nhlt_free;
                }
                skl->skl_sst->enable_miscbdcge = skl_enable_miscbdcge;
-
        }
        if (bus->mlcap)
                snd_hdac_ext_bus_get_ml_capabilities(ebus);
@@ -820,8 +933,8 @@ static int skl_probe(struct pci_dev *pci,
 
 out_dsp_free:
        skl_free_dsp(skl);
-out_mach_free:
-       skl_machine_device_unregister(skl);
+out_clk_free:
+       skl_clock_device_unregister(skl);
 out_nhlt_free:
        skl_nhlt_free(skl->nhlt);
 out_free:
@@ -872,6 +985,7 @@ static void skl_remove(struct pci_dev *pci)
        skl_free_dsp(skl);
        skl_machine_device_unregister(skl);
        skl_dmic_device_unregister(skl);
+       skl_clock_device_unregister(skl);
        skl_nhlt_remove_sysfs(skl);
        skl_nhlt_free(skl->nhlt);
        skl_free(ebus);
index e00cde8..f411579 100644 (file)
 #include <sound/hdaudio_ext.h>
 #include <sound/soc.h>
 #include "skl-nhlt.h"
+#include "skl-ssp-clk.h"
 
 #define SKL_SUSPEND_DELAY 2000
 
+#define SKL_MAX_ASTATE_CFG             3
+
 #define AZX_PCIREG_PGCTL               0x44
 #define AZX_PGCTL_LSRMD_MASK           (1 << 4)
 #define AZX_PCIREG_CGCTL               0x48
@@ -45,6 +48,20 @@ struct skl_dsp_resource {
 
 struct skl_debug;
 
+struct skl_astate_param {
+       u32 kcps;
+       u32 clk_src;
+};
+
+struct skl_astate_config {
+       u32 count;
+       struct skl_astate_param astate_table[0];
+};
+
+struct skl_fw_config {
+       struct skl_astate_config *astate_cfg;
+};
+
 struct skl {
        struct hdac_ext_bus ebus;
        struct pci_dev *pci;
@@ -52,6 +69,7 @@ struct skl {
        unsigned int init_done:1; /* delayed init status */
        struct platform_device *dmic_dev;
        struct platform_device *i2s_dev;
+       struct platform_device *clk_dev;
        struct snd_soc_platform *platform;
        struct snd_soc_dai_driver *dais;
 
@@ -75,6 +93,8 @@ struct skl {
        u8 nr_modules;
        struct skl_module **modules;
        bool use_tplg_pcm;
+       struct skl_fw_config cfg;
+       struct snd_soc_acpi_mach *mach;
 };
 
 #define skl_to_ebus(s) (&(s)->ebus)
@@ -125,6 +145,8 @@ const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
 void skl_update_d0i3c(struct device *dev, bool enable);
 int skl_nhlt_create_sysfs(struct skl *skl);
 void skl_nhlt_remove_sysfs(struct skl *skl);
+void skl_get_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks);
+struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id);
 
 struct skl_module_cfg;
 
index affa7fb..949fc3a 100644 (file)
  * GNU General Public License for more details.
  */
 
-#include <sound/soc.h>
-#include <linux/regmap.h>
-#include <linux/pm_runtime.h>
-
 #include "mt2701-afe-common.h"
 #include "mt2701-afe-clock-ctrl.h"
 
-static const char *aud_clks[MT2701_CLOCK_NUM] = {
-       [MT2701_AUD_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
-       [MT2701_AUD_AUD_MUX1_SEL] = "top_audio_mux1_sel",
-       [MT2701_AUD_AUD_MUX2_SEL] = "top_audio_mux2_sel",
-       [MT2701_AUD_AUD_MUX1_DIV] = "top_audio_mux1_div",
-       [MT2701_AUD_AUD_MUX2_DIV] = "top_audio_mux2_div",
-       [MT2701_AUD_AUD_48K_TIMING] = "top_audio_48k_timing",
-       [MT2701_AUD_AUD_44K_TIMING] = "top_audio_44k_timing",
-       [MT2701_AUD_AUDPLL_MUX_SEL] = "top_audpll_mux_sel",
-       [MT2701_AUD_APLL_SEL] = "top_apll_sel",
-       [MT2701_AUD_AUD1PLL_98M] = "top_aud1_pll_98M",
-       [MT2701_AUD_AUD2PLL_90M] = "top_aud2_pll_90M",
-       [MT2701_AUD_HADDS2PLL_98M] = "top_hadds2_pll_98M",
-       [MT2701_AUD_HADDS2PLL_294M] = "top_hadds2_pll_294M",
-       [MT2701_AUD_AUDPLL] = "top_audpll",
-       [MT2701_AUD_AUDPLL_D4] = "top_audpll_d4",
-       [MT2701_AUD_AUDPLL_D8] = "top_audpll_d8",
-       [MT2701_AUD_AUDPLL_D16] = "top_audpll_d16",
-       [MT2701_AUD_AUDPLL_D24] = "top_audpll_d24",
-       [MT2701_AUD_AUDINTBUS] = "top_audintbus_sel",
-       [MT2701_AUD_CLK_26M] = "clk_26m",
-       [MT2701_AUD_SYSPLL1_D4] = "top_syspll1_d4",
-       [MT2701_AUD_AUD_K1_SRC_SEL] = "top_aud_k1_src_sel",
-       [MT2701_AUD_AUD_K2_SRC_SEL] = "top_aud_k2_src_sel",
-       [MT2701_AUD_AUD_K3_SRC_SEL] = "top_aud_k3_src_sel",
-       [MT2701_AUD_AUD_K4_SRC_SEL] = "top_aud_k4_src_sel",
-       [MT2701_AUD_AUD_K5_SRC_SEL] = "top_aud_k5_src_sel",
-       [MT2701_AUD_AUD_K6_SRC_SEL] = "top_aud_k6_src_sel",
-       [MT2701_AUD_AUD_K1_SRC_DIV] = "top_aud_k1_src_div",
-       [MT2701_AUD_AUD_K2_SRC_DIV] = "top_aud_k2_src_div",
-       [MT2701_AUD_AUD_K3_SRC_DIV] = "top_aud_k3_src_div",
-       [MT2701_AUD_AUD_K4_SRC_DIV] = "top_aud_k4_src_div",
-       [MT2701_AUD_AUD_K5_SRC_DIV] = "top_aud_k5_src_div",
-       [MT2701_AUD_AUD_K6_SRC_DIV] = "top_aud_k6_src_div",
-       [MT2701_AUD_AUD_I2S1_MCLK] = "top_aud_i2s1_mclk",
-       [MT2701_AUD_AUD_I2S2_MCLK] = "top_aud_i2s2_mclk",
-       [MT2701_AUD_AUD_I2S3_MCLK] = "top_aud_i2s3_mclk",
-       [MT2701_AUD_AUD_I2S4_MCLK] = "top_aud_i2s4_mclk",
-       [MT2701_AUD_AUD_I2S5_MCLK] = "top_aud_i2s5_mclk",
-       [MT2701_AUD_AUD_I2S6_MCLK] = "top_aud_i2s6_mclk",
-       [MT2701_AUD_ASM_M_SEL] = "top_asm_m_sel",
-       [MT2701_AUD_ASM_H_SEL] = "top_asm_h_sel",
-       [MT2701_AUD_UNIVPLL2_D4] = "top_univpll2_d4",
-       [MT2701_AUD_UNIVPLL2_D2] = "top_univpll2_d2",
-       [MT2701_AUD_SYSPLL_D5] = "top_syspll_d5",
+static const char *const base_clks[] = {
+       [MT2701_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
+       [MT2701_TOP_AUD_MCLK_SRC0] = "top_audio_mux1_sel",
+       [MT2701_TOP_AUD_MCLK_SRC1] = "top_audio_mux2_sel",
+       [MT2701_TOP_AUD_A1SYS] = "top_audio_a1sys_hp",
+       [MT2701_TOP_AUD_A2SYS] = "top_audio_a2sys_hp",
+       [MT2701_AUDSYS_AFE] = "audio_afe_pd",
+       [MT2701_AUDSYS_AFE_CONN] = "audio_afe_conn_pd",
+       [MT2701_AUDSYS_A1SYS] = "audio_a1sys_pd",
+       [MT2701_AUDSYS_A2SYS] = "audio_a2sys_pd",
 };
 
 int mt2701_init_clock(struct mtk_base_afe *afe)
 {
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
-       int i = 0;
-
-       for (i = 0; i < MT2701_CLOCK_NUM; i++) {
-               afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
-               if (IS_ERR(afe_priv->clocks[i])) {
-                       dev_warn(afe->dev, "%s devm_clk_get %s fail\n",
-                                __func__, aud_clks[i]);
-                       return PTR_ERR(aud_clks[i]);
+       int i;
+
+       for (i = 0; i < MT2701_BASE_CLK_NUM; i++) {
+               afe_priv->base_ck[i] = devm_clk_get(afe->dev, base_clks[i]);
+               if (IS_ERR(afe_priv->base_ck[i])) {
+                       dev_err(afe->dev, "failed to get %s\n", base_clks[i]);
+                       return PTR_ERR(afe_priv->base_ck[i]);
+               }
+       }
+
+       /* Get I2S related clocks */
+       for (i = 0; i < MT2701_I2S_NUM; i++) {
+               struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i];
+               char name[13];
+
+               snprintf(name, sizeof(name), "i2s%d_src_sel", i);
+               i2s_path->sel_ck = devm_clk_get(afe->dev, name);
+               if (IS_ERR(i2s_path->sel_ck)) {
+                       dev_err(afe->dev, "failed to get %s\n", name);
+                       return PTR_ERR(i2s_path->sel_ck);
+               }
+
+               snprintf(name, sizeof(name), "i2s%d_src_div", i);
+               i2s_path->div_ck = devm_clk_get(afe->dev, name);
+               if (IS_ERR(i2s_path->div_ck)) {
+                       dev_err(afe->dev, "failed to get %s\n", name);
+                       return PTR_ERR(i2s_path->div_ck);
+               }
+
+               snprintf(name, sizeof(name), "i2s%d_mclk_en", i);
+               i2s_path->mclk_ck = devm_clk_get(afe->dev, name);
+               if (IS_ERR(i2s_path->mclk_ck)) {
+                       dev_err(afe->dev, "failed to get %s\n", name);
+                       return PTR_ERR(i2s_path->mclk_ck);
+               }
+
+               snprintf(name, sizeof(name), "i2so%d_hop_ck", i);
+               i2s_path->hop_ck[I2S_OUT] = devm_clk_get(afe->dev, name);
+               if (IS_ERR(i2s_path->hop_ck[I2S_OUT])) {
+                       dev_err(afe->dev, "failed to get %s\n", name);
+                       return PTR_ERR(i2s_path->hop_ck[I2S_OUT]);
+               }
+
+               snprintf(name, sizeof(name), "i2si%d_hop_ck", i);
+               i2s_path->hop_ck[I2S_IN] = devm_clk_get(afe->dev, name);
+               if (IS_ERR(i2s_path->hop_ck[I2S_IN])) {
+                       dev_err(afe->dev, "failed to get %s\n", name);
+                       return PTR_ERR(i2s_path->hop_ck[I2S_IN]);
+               }
+
+               snprintf(name, sizeof(name), "asrc%d_out_ck", i);
+               i2s_path->asrco_ck = devm_clk_get(afe->dev, name);
+               if (IS_ERR(i2s_path->asrco_ck)) {
+                       dev_err(afe->dev, "failed to get %s\n", name);
+                       return PTR_ERR(i2s_path->asrco_ck);
                }
        }
 
+       /* Some platforms may support BT path */
+       afe_priv->mrgif_ck = devm_clk_get(afe->dev, "audio_mrgif_pd");
+       if (IS_ERR(afe_priv->mrgif_ck)) {
+               if (PTR_ERR(afe_priv->mrgif_ck) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+
+               afe_priv->mrgif_ck = NULL;
+       }
+
        return 0;
 }
 
-int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_i2s(struct mtk_base_afe *afe, int id, int dir)
 {
-       int ret = 0;
+       struct mt2701_afe_private *afe_priv = afe->platform_priv;
+       struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
+       int ret;
 
-       ret = mt2701_turn_on_a1sys_clock(afe);
+       ret = clk_prepare_enable(i2s_path->asrco_ck);
        if (ret) {
-               dev_err(afe->dev, "%s turn_on_a1sys_clock fail %d\n",
-                       __func__, ret);
+               dev_err(afe->dev, "failed to enable ASRC clock %d\n", ret);
                return ret;
        }
 
-       ret = mt2701_turn_on_a2sys_clock(afe);
+       ret = clk_prepare_enable(i2s_path->hop_ck[dir]);
        if (ret) {
-               dev_err(afe->dev, "%s turn_on_a2sys_clock fail %d\n",
-                       __func__, ret);
-               mt2701_turn_off_a1sys_clock(afe);
-               return ret;
+               dev_err(afe->dev, "failed to enable I2S clock %d\n", ret);
+               goto err_hop_ck;
        }
 
-       ret = mt2701_turn_on_afe_clock(afe);
-       if (ret) {
-               dev_err(afe->dev, "%s turn_on_afe_clock fail %d\n",
-                       __func__, ret);
-               mt2701_turn_off_a1sys_clock(afe);
-               mt2701_turn_off_a2sys_clock(afe);
-               return ret;
-       }
+       return 0;
 
-       regmap_update_bits(afe->regmap, ASYS_TOP_CON,
-                          AUDIO_TOP_CON0_A1SYS_A2SYS_ON,
-                          AUDIO_TOP_CON0_A1SYS_A2SYS_ON);
-       regmap_update_bits(afe->regmap, AFE_DAC_CON0,
-                          AFE_DAC_CON0_AFE_ON,
-                          AFE_DAC_CON0_AFE_ON);
-       regmap_write(afe->regmap, PWR2_TOP_CON,
-                    PWR2_TOP_CON_INIT_VAL);
-       regmap_write(afe->regmap, PWR1_ASM_CON1,
-                    PWR1_ASM_CON1_INIT_VAL);
-       regmap_write(afe->regmap, PWR2_ASM_CON1,
-                    PWR2_ASM_CON1_INIT_VAL);
+err_hop_ck:
+       clk_disable_unprepare(i2s_path->asrco_ck);
 
-       return 0;
+       return ret;
 }
 
-void mt2701_afe_disable_clock(struct mtk_base_afe *afe)
+void mt2701_afe_disable_i2s(struct mtk_base_afe *afe, int id, int dir)
 {
-       mt2701_turn_off_afe_clock(afe);
-       mt2701_turn_off_a1sys_clock(afe);
-       mt2701_turn_off_a2sys_clock(afe);
-       regmap_update_bits(afe->regmap, ASYS_TOP_CON,
-                          AUDIO_TOP_CON0_A1SYS_A2SYS_ON, 0);
-       regmap_update_bits(afe->regmap, AFE_DAC_CON0,
-                          AFE_DAC_CON0_AFE_ON, 0);
+       struct mt2701_afe_private *afe_priv = afe->platform_priv;
+       struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
+
+       clk_disable_unprepare(i2s_path->hop_ck[dir]);
+       clk_disable_unprepare(i2s_path->asrco_ck);
 }
 
-int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_mclk(struct mtk_base_afe *afe, int id)
 {
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
-       int ret = 0;
-
-       /* Set Mux */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
-               goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
-       }
-
-       ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL],
-                            afe_priv->clocks[MT2701_AUD_AUD1PLL_98M]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-                       aud_clks[MT2701_AUD_AUD_MUX1_SEL],
-                       aud_clks[MT2701_AUD_AUD1PLL_98M], ret);
-               goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
-       }
-
-       /* Set Divider */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__,
-                       aud_clks[MT2701_AUD_AUD_MUX1_DIV],
-                       ret);
-               goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
-       }
-
-       ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV],
-                          MT2701_AUD_AUD_MUX1_DIV_RATE);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
-                       aud_clks[MT2701_AUD_AUD_MUX1_DIV],
-                       MT2701_AUD_AUD_MUX1_DIV_RATE, ret);
-               goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
-       }
+       struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
 
-       /* Enable clock gate */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_AUD_48K_TIMING], ret);
-               goto A1SYS_CLK_AUD_48K_ERR;
-       }
+       return clk_prepare_enable(i2s_path->mclk_ck);
+}
 
-       /* Enable infra audio */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
-               goto A1SYS_CLK_INFRA_ERR;
-       }
+void mt2701_afe_disable_mclk(struct mtk_base_afe *afe, int id)
+{
+       struct mt2701_afe_private *afe_priv = afe->platform_priv;
+       struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
 
-       return 0;
+       clk_disable_unprepare(i2s_path->mclk_ck);
+}
 
-A1SYS_CLK_INFRA_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-A1SYS_CLK_AUD_48K_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
-A1SYS_CLK_AUD_MUX1_DIV_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
-A1SYS_CLK_AUD_MUX1_SEL_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+int mt2701_enable_btmrg_clk(struct mtk_base_afe *afe)
+{
+       struct mt2701_afe_private *afe_priv = afe->platform_priv;
 
-       return ret;
+       return clk_prepare_enable(afe_priv->mrgif_ck);
 }
 
-void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe)
+void mt2701_disable_btmrg_clk(struct mtk_base_afe *afe)
 {
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
 
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+       clk_disable_unprepare(afe_priv->mrgif_ck);
 }
 
-int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe)
+static int mt2701_afe_enable_audsys(struct mtk_base_afe *afe)
 {
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
-       int ret = 0;
+       int ret;
 
-       /* Set Mux */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
-               goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
-       }
+       /* Enable infra clock gate */
+       ret = clk_prepare_enable(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
+       if (ret)
+               return ret;
 
-       ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL],
-                            afe_priv->clocks[MT2701_AUD_AUD2PLL_90M]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-                       aud_clks[MT2701_AUD_AUD_MUX2_SEL],
-                       aud_clks[MT2701_AUD_AUD2PLL_90M], ret);
-               goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
-       }
+       /* Enable top a1sys clock gate */
+       ret = clk_prepare_enable(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+       if (ret)
+               goto err_a1sys;
 
-       /* Set Divider */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_AUD_MUX2_DIV], ret);
-               goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
-       }
+       /* Enable top a2sys clock gate */
+       ret = clk_prepare_enable(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+       if (ret)
+               goto err_a2sys;
 
-       ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV],
-                          MT2701_AUD_AUD_MUX2_DIV_RATE);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
-                       aud_clks[MT2701_AUD_AUD_MUX2_DIV],
-                       MT2701_AUD_AUD_MUX2_DIV_RATE, ret);
-               goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
-       }
+       /* Internal clock gates */
+       ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+       if (ret)
+               goto err_afe;
 
-       /* Enable clock gate */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_AUD_44K_TIMING], ret);
-               goto A2SYS_CLK_AUD_44K_ERR;
-       }
+       ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+       if (ret)
+               goto err_audio_a1sys;
 
-       /* Enable infra audio */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
-               goto A2SYS_CLK_INFRA_ERR;
-       }
+       ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+       if (ret)
+               goto err_audio_a2sys;
+
+       ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_AFE_CONN]);
+       if (ret)
+               goto err_afe_conn;
 
        return 0;
 
-A2SYS_CLK_INFRA_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-A2SYS_CLK_AUD_44K_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
-A2SYS_CLK_AUD_MUX2_DIV_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
-A2SYS_CLK_AUD_MUX2_SEL_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+err_afe_conn:
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+err_audio_a2sys:
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+err_audio_a1sys:
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+err_afe:
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+err_a2sys:
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+err_a1sys:
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
 
        return ret;
 }
 
-void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe)
+static void mt2701_afe_disable_audsys(struct mtk_base_afe *afe)
 {
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
 
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE_CONN]);
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+       clk_disable_unprepare(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
 }
 
-int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
 {
-       struct mt2701_afe_private *afe_priv = afe->platform_priv;
        int ret;
 
-       /* enable INFRA_SYS */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
-               goto AFE_AUD_INFRA_ERR;
-       }
-
-       /* Set MT2701_AUD_AUDINTBUS to MT2701_AUD_SYSPLL1_D4 */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_AUDINTBUS], ret);
-               goto AFE_AUD_AUDINTBUS_ERR;
-       }
-
-       ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUDINTBUS],
-                            afe_priv->clocks[MT2701_AUD_SYSPLL1_D4]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-                       aud_clks[MT2701_AUD_AUDINTBUS],
-                       aud_clks[MT2701_AUD_SYSPLL1_D4], ret);
-               goto AFE_AUD_AUDINTBUS_ERR;
-       }
-
-       /* Set MT2701_AUD_ASM_H_SEL to MT2701_AUD_UNIVPLL2_D2 */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+       /* Enable audio system */
+       ret = mt2701_afe_enable_audsys(afe);
        if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_ASM_H_SEL], ret);
-               goto AFE_AUD_ASM_H_ERR;
-       }
-
-       ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_H_SEL],
-                            afe_priv->clocks[MT2701_AUD_UNIVPLL2_D2]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-                       aud_clks[MT2701_AUD_ASM_H_SEL],
-                       aud_clks[MT2701_AUD_UNIVPLL2_D2], ret);
-               goto AFE_AUD_ASM_H_ERR;
-       }
-
-       /* Set MT2701_AUD_ASM_M_SEL to MT2701_AUD_UNIVPLL2_D4 */
-       ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[MT2701_AUD_ASM_M_SEL], ret);
-               goto AFE_AUD_ASM_M_ERR;
+               dev_err(afe->dev, "failed to enable audio system %d\n", ret);
+               return ret;
        }
 
-       ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_M_SEL],
-                            afe_priv->clocks[MT2701_AUD_UNIVPLL2_D4]);
-       if (ret) {
-               dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-                       aud_clks[MT2701_AUD_ASM_M_SEL],
-                       aud_clks[MT2701_AUD_UNIVPLL2_D4], ret);
-               goto AFE_AUD_ASM_M_ERR;
-       }
+       regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+                          ASYS_TOP_CON_ASYS_TIMING_ON,
+                          ASYS_TOP_CON_ASYS_TIMING_ON);
+       regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+                          AFE_DAC_CON0_AFE_ON,
+                          AFE_DAC_CON0_AFE_ON);
 
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
-                          AUDIO_TOP_CON0_PDN_AFE, 0);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
-                          AUDIO_TOP_CON0_PDN_APLL_CK, 0);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          AUDIO_TOP_CON4_PDN_A1SYS, 0);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          AUDIO_TOP_CON4_PDN_A2SYS, 0);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          AUDIO_TOP_CON4_PDN_AFE_CONN, 0);
+       /* Configure ASRC */
+       regmap_write(afe->regmap, PWR1_ASM_CON1, PWR1_ASM_CON1_INIT_VAL);
+       regmap_write(afe->regmap, PWR2_ASM_CON1, PWR2_ASM_CON1_INIT_VAL);
 
        return 0;
-
-AFE_AUD_ASM_M_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-AFE_AUD_ASM_H_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
-AFE_AUD_AUDINTBUS_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
-AFE_AUD_INFRA_ERR:
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-
-       return ret;
 }
 
-void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe)
+int mt2701_afe_disable_clock(struct mtk_base_afe *afe)
 {
-       struct mt2701_afe_private *afe_priv = afe->platform_priv;
+       regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+                          ASYS_TOP_CON_ASYS_TIMING_ON, 0);
+       regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+                          AFE_DAC_CON0_AFE_ON, 0);
+
+       mt2701_afe_disable_audsys(afe);
 
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
-       clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
-                          AUDIO_TOP_CON0_PDN_AFE, AUDIO_TOP_CON0_PDN_AFE);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
-                          AUDIO_TOP_CON0_PDN_APLL_CK,
-                          AUDIO_TOP_CON0_PDN_APLL_CK);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          AUDIO_TOP_CON4_PDN_A1SYS,
-                          AUDIO_TOP_CON4_PDN_A1SYS);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          AUDIO_TOP_CON4_PDN_A2SYS,
-                          AUDIO_TOP_CON4_PDN_A2SYS);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          AUDIO_TOP_CON4_PDN_AFE_CONN,
-                          AUDIO_TOP_CON4_PDN_AFE_CONN);
+       return 0;
 }
 
 void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
                               int mclk)
 {
-       struct mt2701_afe_private *afe_priv = afe->platform_priv;
+       struct mt2701_afe_private *priv = afe->platform_priv;
+       struct mt2701_i2s_path *i2s_path = &priv->i2s_path[id];
        int ret;
-       int aud_src_div_id = MT2701_AUD_AUD_K1_SRC_DIV + id;
-       int aud_src_clk_id = MT2701_AUD_AUD_K1_SRC_SEL + id;
 
-       /* Set MCLK Kx_SRC_SEL(domain) */
-       ret = clk_prepare_enable(afe_priv->clocks[aud_src_clk_id]);
-       if (ret)
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[aud_src_clk_id], ret);
-
-       if (domain == 0) {
-               ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
-                                    afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
-               if (ret)
-                       dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
-                               __func__, aud_clks[aud_src_clk_id],
-                               aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
-       } else {
-               ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
-                                    afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
-               if (ret)
-                       dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
-                               __func__, aud_clks[aud_src_clk_id],
-                               aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
-       }
-       clk_disable_unprepare(afe_priv->clocks[aud_src_clk_id]);
+       /* Set mclk source */
+       if (domain == 0)
+               ret = clk_set_parent(i2s_path->sel_ck,
+                                    priv->base_ck[MT2701_TOP_AUD_MCLK_SRC0]);
+       else
+               ret = clk_set_parent(i2s_path->sel_ck,
+                                    priv->base_ck[MT2701_TOP_AUD_MCLK_SRC1]);
 
-       /* Set MCLK Kx_SRC_DIV(divider) */
-       ret = clk_prepare_enable(afe_priv->clocks[aud_src_div_id]);
        if (ret)
-               dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-                       __func__, aud_clks[aud_src_div_id], ret);
+               dev_err(afe->dev, "failed to set domain%d mclk source %d\n",
+                       domain, ret);
 
-       ret = clk_set_rate(afe_priv->clocks[aud_src_div_id], mclk);
+       /* Set mclk divider */
+       ret = clk_set_rate(i2s_path->div_ck, mclk);
        if (ret)
-               dev_err(afe->dev, "%s clk_set_rate %s-%d fail %d\n", __func__,
-                       aud_clks[aud_src_div_id], mclk, ret);
-       clk_disable_unprepare(afe_priv->clocks[aud_src_div_id]);
+               dev_err(afe->dev, "failed to set mclk divider %d\n", ret);
 }
-
-MODULE_DESCRIPTION("MT2701 afe clock control");
-MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
-MODULE_LICENSE("GPL v2");
index 6497d57..15417d9 100644 (file)
@@ -21,16 +21,15 @@ struct mtk_base_afe;
 
 int mt2701_init_clock(struct mtk_base_afe *afe);
 int mt2701_afe_enable_clock(struct mtk_base_afe *afe);
-void mt2701_afe_disable_clock(struct mtk_base_afe *afe);
+int mt2701_afe_disable_clock(struct mtk_base_afe *afe);
 
-int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe);
+int mt2701_afe_enable_i2s(struct mtk_base_afe *afe, int id, int dir);
+void mt2701_afe_disable_i2s(struct mtk_base_afe *afe, int id, int dir);
+int mt2701_afe_enable_mclk(struct mtk_base_afe *afe, int id);
+void mt2701_afe_disable_mclk(struct mtk_base_afe *afe, int id);
 
-int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe);
-
-int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe);
+int mt2701_enable_btmrg_clk(struct mtk_base_afe *afe);
+void mt2701_disable_btmrg_clk(struct mtk_base_afe *afe);
 
 void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
                               int mclk);
index c19430e..ae8ddea 100644 (file)
@@ -16,6 +16,7 @@
 
 #ifndef _MT_2701_AFE_COMMON_H_
 #define _MT_2701_AFE_COMMON_H_
+
 #include <sound/soc.h>
 #include <linux/clk.h>
 #include <linux/regmap.h>
 #define MT2701_STREAM_DIR_NUM (SNDRV_PCM_STREAM_LAST + 1)
 #define MT2701_PLL_DOMAIN_0_RATE       98304000
 #define MT2701_PLL_DOMAIN_1_RATE       90316800
-#define MT2701_AUD_AUD_MUX1_DIV_RATE (MT2701_PLL_DOMAIN_0_RATE / 2)
-#define MT2701_AUD_AUD_MUX2_DIV_RATE (MT2701_PLL_DOMAIN_1_RATE / 2)
-
-enum {
-       MT2701_I2S_1,
-       MT2701_I2S_2,
-       MT2701_I2S_3,
-       MT2701_I2S_4,
-       MT2701_I2S_NUM,
-};
+#define MT2701_I2S_NUM 4
 
 enum {
        MT2701_MEMIF_DL1,
@@ -62,60 +54,23 @@ enum {
 };
 
 enum {
-       MT2701_IRQ_ASYS_START,
-       MT2701_IRQ_ASYS_IRQ1 = MT2701_IRQ_ASYS_START,
+       MT2701_IRQ_ASYS_IRQ1,
        MT2701_IRQ_ASYS_IRQ2,
        MT2701_IRQ_ASYS_IRQ3,
        MT2701_IRQ_ASYS_END,
 };
 
-/* 2701 clock def */
-enum audio_system_clock_type {
-       MT2701_AUD_INFRA_SYS_AUDIO,
-       MT2701_AUD_AUD_MUX1_SEL,
-       MT2701_AUD_AUD_MUX2_SEL,
-       MT2701_AUD_AUD_MUX1_DIV,
-       MT2701_AUD_AUD_MUX2_DIV,
-       MT2701_AUD_AUD_48K_TIMING,
-       MT2701_AUD_AUD_44K_TIMING,
-       MT2701_AUD_AUDPLL_MUX_SEL,
-       MT2701_AUD_APLL_SEL,
-       MT2701_AUD_AUD1PLL_98M,
-       MT2701_AUD_AUD2PLL_90M,
-       MT2701_AUD_HADDS2PLL_98M,
-       MT2701_AUD_HADDS2PLL_294M,
-       MT2701_AUD_AUDPLL,
-       MT2701_AUD_AUDPLL_D4,
-       MT2701_AUD_AUDPLL_D8,
-       MT2701_AUD_AUDPLL_D16,
-       MT2701_AUD_AUDPLL_D24,
-       MT2701_AUD_AUDINTBUS,
-       MT2701_AUD_CLK_26M,
-       MT2701_AUD_SYSPLL1_D4,
-       MT2701_AUD_AUD_K1_SRC_SEL,
-       MT2701_AUD_AUD_K2_SRC_SEL,
-       MT2701_AUD_AUD_K3_SRC_SEL,
-       MT2701_AUD_AUD_K4_SRC_SEL,
-       MT2701_AUD_AUD_K5_SRC_SEL,
-       MT2701_AUD_AUD_K6_SRC_SEL,
-       MT2701_AUD_AUD_K1_SRC_DIV,
-       MT2701_AUD_AUD_K2_SRC_DIV,
-       MT2701_AUD_AUD_K3_SRC_DIV,
-       MT2701_AUD_AUD_K4_SRC_DIV,
-       MT2701_AUD_AUD_K5_SRC_DIV,
-       MT2701_AUD_AUD_K6_SRC_DIV,
-       MT2701_AUD_AUD_I2S1_MCLK,
-       MT2701_AUD_AUD_I2S2_MCLK,
-       MT2701_AUD_AUD_I2S3_MCLK,
-       MT2701_AUD_AUD_I2S4_MCLK,
-       MT2701_AUD_AUD_I2S5_MCLK,
-       MT2701_AUD_AUD_I2S6_MCLK,
-       MT2701_AUD_ASM_M_SEL,
-       MT2701_AUD_ASM_H_SEL,
-       MT2701_AUD_UNIVPLL2_D4,
-       MT2701_AUD_UNIVPLL2_D2,
-       MT2701_AUD_SYSPLL_D5,
-       MT2701_CLOCK_NUM
+enum audio_base_clock {
+       MT2701_INFRA_SYS_AUDIO,
+       MT2701_TOP_AUD_MCLK_SRC0,
+       MT2701_TOP_AUD_MCLK_SRC1,
+       MT2701_TOP_AUD_A1SYS,
+       MT2701_TOP_AUD_A2SYS,
+       MT2701_AUDSYS_AFE,
+       MT2701_AUDSYS_AFE_CONN,
+       MT2701_AUDSYS_A1SYS,
+       MT2701_AUDSYS_A2SYS,
+       MT2701_BASE_CLK_NUM,
 };
 
 static const unsigned int mt2701_afe_backup_list[] = {
@@ -139,12 +94,8 @@ static const unsigned int mt2701_afe_backup_list[] = {
        AFE_MEMIF_PBUF_SIZE,
 };
 
-struct snd_pcm_substream;
-struct mtk_base_irq_data;
-
 struct mt2701_i2s_data {
        int i2s_ctrl_reg;
-       int i2s_pwn_shift;
        int i2s_asrc_fs_shift;
        int i2s_asrc_fs_mask;
 };
@@ -160,12 +111,18 @@ struct mt2701_i2s_path {
        int mclk_rate;
        int on[I2S_DIR_NUM];
        int occupied[I2S_DIR_NUM];
-       const struct mt2701_i2s_data *i2s_data[2];
+       const struct mt2701_i2s_data *i2s_data[I2S_DIR_NUM];
+       struct clk *hop_ck[I2S_DIR_NUM];
+       struct clk *sel_ck;
+       struct clk *div_ck;
+       struct clk *mclk_ck;
+       struct clk *asrco_ck;
 };
 
 struct mt2701_afe_private {
-       struct clk *clocks[MT2701_CLOCK_NUM];
        struct mt2701_i2s_path i2s_path[MT2701_I2S_NUM];
+       struct clk *base_ck[MT2701_BASE_CLK_NUM];
+       struct clk *mrgif_ck;
        bool mrg_enable[MT2701_STREAM_DIR_NUM];
 };
 
index 8fda182..5bc4e00 100644 (file)
 
 #include <linux/delay.h>
 #include <linux/module.h>
+#include <linux/mfd/syscon.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/pm_runtime.h>
-#include <sound/soc.h>
 
 #include "mt2701-afe-common.h"
-
 #include "mt2701-afe-clock-ctrl.h"
 #include "../common/mtk-afe-platform-driver.h"
 #include "../common/mtk-afe-fe-dai.h"
 
-#define AFE_IRQ_STATUS_BITS    0xff
-
 static const struct snd_pcm_hardware mt2701_afe_hardware = {
        .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED
                | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID,
@@ -97,40 +94,26 @@ static int mt2701_afe_i2s_startup(struct snd_pcm_substream *substream,
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
-       struct mt2701_afe_private *afe_priv = afe->platform_priv;
        int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
-       int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
-       int ret = 0;
 
        if (i2s_num < 0)
                return i2s_num;
 
-       /* enable mclk */
-       ret = clk_prepare_enable(afe_priv->clocks[clk_num]);
-       if (ret)
-               dev_err(afe->dev, "Failed to enable mclk for I2S: %d\n",
-                       i2s_num);
-
-       return ret;
+       return mt2701_afe_enable_mclk(afe, i2s_num);
 }
 
 static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
                                        struct snd_soc_dai *dai,
+                                       int i2s_num,
                                        int dir_invert)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
-       int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
-       struct mt2701_i2s_path *i2s_path;
+       struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num];
        const struct mt2701_i2s_data *i2s_data;
        int stream_dir = substream->stream;
 
-       if (i2s_num < 0)
-               return i2s_num;
-
-       i2s_path = &afe_priv->i2s_path[i2s_num];
-
        if (dir_invert) {
                if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
                        stream_dir = SNDRV_PCM_STREAM_CAPTURE;
@@ -151,9 +134,9 @@ static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
        /* disable i2s */
        regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
                           ASYS_I2S_CON_I2S_EN, 0);
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          1 << i2s_data->i2s_pwn_shift,
-                          1 << i2s_data->i2s_pwn_shift);
+
+       mt2701_afe_disable_i2s(afe, i2s_num, stream_dir);
+
        return 0;
 }
 
@@ -165,7 +148,6 @@ static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
        int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
        struct mt2701_i2s_path *i2s_path;
-       int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
 
        if (i2s_num < 0)
                return;
@@ -177,37 +159,32 @@ static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
        else
                goto I2S_UNSTART;
 
-       mt2701_afe_i2s_path_shutdown(substream, dai, 0);
+       mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 0);
 
        /* need to disable i2s-out path when disable i2s-in */
        if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-               mt2701_afe_i2s_path_shutdown(substream, dai, 1);
+               mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 1);
 
 I2S_UNSTART:
        /* disable mclk */
-       clk_disable_unprepare(afe_priv->clocks[clk_num]);
+       mt2701_afe_disable_mclk(afe, i2s_num);
 }
 
 static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
                                          struct snd_soc_dai *dai,
+                                         int i2s_num,
                                          int dir_invert)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
-       int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
-       struct mt2701_i2s_path *i2s_path;
+       struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num];
        const struct mt2701_i2s_data *i2s_data;
        struct snd_pcm_runtime * const runtime = substream->runtime;
        int reg, fs, w_len = 1; /* now we support bck 64bits only */
        int stream_dir = substream->stream;
        unsigned int mask = 0, val = 0;
 
-       if (i2s_num < 0)
-               return i2s_num;
-
-       i2s_path = &afe_priv->i2s_path[i2s_num];
-
        if (dir_invert) {
                if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
                        stream_dir = SNDRV_PCM_STREAM_CAPTURE;
@@ -251,9 +228,7 @@ static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
                           fs << i2s_data->i2s_asrc_fs_shift);
 
        /* enable i2s */
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          1 << i2s_data->i2s_pwn_shift,
-                          0 << i2s_data->i2s_pwn_shift);
+       mt2701_afe_enable_i2s(afe, i2s_num, stream_dir);
 
        /* reset i2s hw status before enable */
        regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
@@ -300,13 +275,13 @@ static int mt2701_afe_i2s_prepare(struct snd_pcm_substream *substream,
        mt2701_mclk_configuration(afe, i2s_num, clk_domain, mclk_rate);
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               mt2701_i2s_path_prepare_enable(substream, dai, 0);
+               mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0);
        } else {
                /* need to enable i2s-out path when enable i2s-in */
                /* prepare for another direction "out" */
-               mt2701_i2s_path_prepare_enable(substream, dai, 1);
+               mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 1);
                /* prepare for "in" */
-               mt2701_i2s_path_prepare_enable(substream, dai, 0);
+               mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0);
        }
 
        return 0;
@@ -339,9 +314,11 @@ static int mt2701_btmrg_startup(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
        struct mt2701_afe_private *afe_priv = afe->platform_priv;
+       int ret;
 
-       regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                          AUDIO_TOP_CON4_PDN_MRGIF, 0);
+       ret = mt2701_enable_btmrg_clk(afe);
+       if (ret)
+               return ret;
 
        afe_priv->mrg_enable[substream->stream] = 1;
        return 0;
@@ -406,9 +383,7 @@ static void mt2701_btmrg_shutdown(struct snd_pcm_substream *substream,
                                   AFE_MRGIF_CON_MRG_EN, 0);
                regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
                                   AFE_MRGIF_CON_MRG_I2S_EN, 0);
-               regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-                                  AUDIO_TOP_CON4_PDN_MRGIF,
-                                  AUDIO_TOP_CON4_PDN_MRGIF);
+               mt2701_disable_btmrg_clk(afe);
        }
        afe_priv->mrg_enable[substream->stream] = 0;
 }
@@ -574,7 +549,6 @@ static const struct snd_soc_dai_ops mt2701_single_memif_dai_ops = {
        .hw_free        = mtk_afe_fe_hw_free,
        .prepare        = mtk_afe_fe_prepare,
        .trigger        = mtk_afe_fe_trigger,
-
 };
 
 static const struct snd_soc_dai_ops mt2701_dlm_memif_dai_ops = {
@@ -915,31 +889,6 @@ static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s4[] = {
                                    PWR2_TOP_CON, 19, 1, 0),
 };
 
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc0[] = {
-       SOC_DAPM_SINGLE_AUTODISABLE("Asrc0 out Switch", AUDIO_TOP_CON4, 14, 1,
-                                   1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc1[] = {
-       SOC_DAPM_SINGLE_AUTODISABLE("Asrc1 out Switch", AUDIO_TOP_CON4, 15, 1,
-                                   1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc2[] = {
-       SOC_DAPM_SINGLE_AUTODISABLE("Asrc2 out Switch", PWR2_TOP_CON, 6, 1,
-                                   1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc3[] = {
-       SOC_DAPM_SINGLE_AUTODISABLE("Asrc3 out Switch", PWR2_TOP_CON, 7, 1,
-                                   1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc4[] = {
-       SOC_DAPM_SINGLE_AUTODISABLE("Asrc4 out Switch", PWR2_TOP_CON, 8, 1,
-                                   1),
-};
-
 static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
        /* inter-connections */
        SND_SOC_DAPM_MIXER("I00", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -999,19 +948,6 @@ static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
        SND_SOC_DAPM_MIXER("I18I19", SND_SOC_NOPM, 0, 0,
                           mt2701_afe_multi_ch_out_i2s3,
                           ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s3)),
-
-       SND_SOC_DAPM_MIXER("ASRC_O0", SND_SOC_NOPM, 0, 0,
-                          mt2701_afe_multi_ch_out_asrc0,
-                          ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc0)),
-       SND_SOC_DAPM_MIXER("ASRC_O1", SND_SOC_NOPM, 0, 0,
-                          mt2701_afe_multi_ch_out_asrc1,
-                          ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc1)),
-       SND_SOC_DAPM_MIXER("ASRC_O2", SND_SOC_NOPM, 0, 0,
-                          mt2701_afe_multi_ch_out_asrc2,
-                          ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc2)),
-       SND_SOC_DAPM_MIXER("ASRC_O3", SND_SOC_NOPM, 0, 0,
-                          mt2701_afe_multi_ch_out_asrc3,
-                          ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc3)),
 };
 
 static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
@@ -1021,7 +957,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
 
        {"I2S0 Playback", NULL, "O15"},
        {"I2S0 Playback", NULL, "O16"},
-
        {"I2S1 Playback", NULL, "O17"},
        {"I2S1 Playback", NULL, "O18"},
        {"I2S2 Playback", NULL, "O19"},
@@ -1038,7 +973,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
 
        {"I00", NULL, "I2S0 Capture"},
        {"I01", NULL, "I2S0 Capture"},
-
        {"I02", NULL, "I2S1 Capture"},
        {"I03", NULL, "I2S1 Capture"},
        /* I02,03 link to UL2, also need to open I2S0 */
@@ -1046,15 +980,10 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
 
        {"I26", NULL, "BT Capture"},
 
-       {"ASRC_O0", "Asrc0 out Switch", "DLM"},
-       {"ASRC_O1", "Asrc1 out Switch", "DLM"},
-       {"ASRC_O2", "Asrc2 out Switch", "DLM"},
-       {"ASRC_O3", "Asrc3 out Switch", "DLM"},
-
-       {"I12I13", "Multich I2S0 Out Switch", "ASRC_O0"},
-       {"I14I15", "Multich I2S1 Out Switch", "ASRC_O1"},
-       {"I16I17", "Multich I2S2 Out Switch", "ASRC_O2"},
-       {"I18I19", "Multich I2S3 Out Switch", "ASRC_O3"},
+       {"I12I13", "Multich I2S0 Out Switch", "DLM"},
+       {"I14I15", "Multich I2S1 Out Switch", "DLM"},
+       {"I16I17", "Multich I2S2 Out Switch", "DLM"},
+       {"I18I19", "Multich I2S3 Out Switch", "DLM"},
 
        { "I12", NULL, "I12I13" },
        { "I13", NULL, "I12I13" },
@@ -1079,7 +1008,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
        { "O21", "I18 Switch", "I18" },
        { "O22", "I19 Switch", "I19" },
        { "O31", "I35 Switch", "I35" },
-
 };
 
 static const struct snd_soc_component_driver mt2701_afe_pcm_dai_component = {
@@ -1386,14 +1314,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
        {
                {
                        .i2s_ctrl_reg = ASYS_I2SO1_CON,
-                       .i2s_pwn_shift = 6,
                        .i2s_asrc_fs_shift = 0,
                        .i2s_asrc_fs_mask = 0x1f,
 
                },
                {
                        .i2s_ctrl_reg = ASYS_I2SIN1_CON,
-                       .i2s_pwn_shift = 0,
                        .i2s_asrc_fs_shift = 0,
                        .i2s_asrc_fs_mask = 0x1f,
 
@@ -1402,14 +1328,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
        {
                {
                        .i2s_ctrl_reg = ASYS_I2SO2_CON,
-                       .i2s_pwn_shift = 7,
                        .i2s_asrc_fs_shift = 5,
                        .i2s_asrc_fs_mask = 0x1f,
 
                },
                {
                        .i2s_ctrl_reg = ASYS_I2SIN2_CON,
-                       .i2s_pwn_shift = 1,
                        .i2s_asrc_fs_shift = 5,
                        .i2s_asrc_fs_mask = 0x1f,
 
@@ -1418,14 +1342,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
        {
                {
                        .i2s_ctrl_reg = ASYS_I2SO3_CON,
-                       .i2s_pwn_shift = 8,
                        .i2s_asrc_fs_shift = 10,
                        .i2s_asrc_fs_mask = 0x1f,
 
                },
                {
                        .i2s_ctrl_reg = ASYS_I2SIN3_CON,
-                       .i2s_pwn_shift = 2,
                        .i2s_asrc_fs_shift = 10,
                        .i2s_asrc_fs_mask = 0x1f,
 
@@ -1434,14 +1356,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
        {
                {
                        .i2s_ctrl_reg = ASYS_I2SO4_CON,
-                       .i2s_pwn_shift = 9,
                        .i2s_asrc_fs_shift = 15,
                        .i2s_asrc_fs_mask = 0x1f,
 
                },
                {
                        .i2s_ctrl_reg = ASYS_I2SIN4_CON,
-                       .i2s_pwn_shift = 3,
                        .i2s_asrc_fs_shift = 15,
                        .i2s_asrc_fs_mask = 0x1f,
 
@@ -1449,14 +1369,6 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
        },
 };
 
-static const struct regmap_config mt2701_afe_regmap_config = {
-       .reg_bits = 32,
-       .reg_stride = 4,
-       .val_bits = 32,
-       .max_register = AFE_END_ADDR,
-       .cache_type = REGCACHE_NONE,
-};
-
 static irqreturn_t mt2701_asys_isr(int irq_id, void *dev)
 {
        int id;
@@ -1483,8 +1395,7 @@ static int mt2701_afe_runtime_suspend(struct device *dev)
 {
        struct mtk_base_afe *afe = dev_get_drvdata(dev);
 
-       mt2701_afe_disable_clock(afe);
-       return 0;
+       return mt2701_afe_disable_clock(afe);
 }
 
 static int mt2701_afe_runtime_resume(struct device *dev)
@@ -1496,21 +1407,22 @@ static int mt2701_afe_runtime_resume(struct device *dev)
 
 static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
 {
+       struct snd_soc_component *component;
        struct mtk_base_afe *afe;
        struct mt2701_afe_private *afe_priv;
-       struct resource *res;
        struct device *dev;
        int i, irq_id, ret;
 
        afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
        if (!afe)
                return -ENOMEM;
+
        afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
                                          GFP_KERNEL);
        if (!afe->platform_priv)
                return -ENOMEM;
-       afe_priv = afe->platform_priv;
 
+       afe_priv = afe->platform_priv;
        afe->dev = &pdev->dev;
        dev = afe->dev;
 
@@ -1527,17 +1439,11 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
                return ret;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
-
-       if (IS_ERR(afe->base_addr))
-               return PTR_ERR(afe->base_addr);
-
-       afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
-               &mt2701_afe_regmap_config);
-       if (IS_ERR(afe->regmap))
+       afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
+       if (IS_ERR(afe->regmap)) {
+               dev_err(dev, "could not get regmap from parent\n");
                return PTR_ERR(afe->regmap);
+       }
 
        mutex_init(&afe->irq_alloc_lock);
 
@@ -1545,7 +1451,6 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
        afe->memif_size = MT2701_MEMIF_NUM;
        afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
                                  GFP_KERNEL);
-
        if (!afe->memif)
                return -ENOMEM;
 
@@ -1558,7 +1463,6 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
        afe->irqs_size = MT2701_IRQ_ASYS_END;
        afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
                                 GFP_KERNEL);
-
        if (!afe->irqs)
                return -ENOMEM;
 
@@ -1573,10 +1477,15 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
                        = &mt2701_i2s_data[i][I2S_IN];
        }
 
+       component = kzalloc(sizeof(*component), GFP_KERNEL);
+       if (!component)
+               return -ENOMEM;
+
+       component->regmap = afe->regmap;
+
        afe->mtk_afe_hardware = &mt2701_afe_hardware;
        afe->memif_fs = mt2701_memif_fs;
        afe->irq_fs = mt2701_irq_fs;
-
        afe->reg_back_up_list = mt2701_afe_backup_list;
        afe->reg_back_up_list_num = ARRAY_SIZE(mt2701_afe_backup_list);
        afe->runtime_resume = mt2701_afe_runtime_resume;
@@ -1586,59 +1495,58 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
        ret = mt2701_init_clock(afe);
        if (ret) {
                dev_err(dev, "init clock error\n");
-               return ret;
+               goto err_init_clock;
        }
 
        platform_set_drvdata(pdev, afe);
-       pm_runtime_enable(&pdev->dev);
-       if (!pm_runtime_enabled(&pdev->dev))
-               goto err_pm_disable;
-       pm_runtime_get_sync(&pdev->dev);
 
-       ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
+       pm_runtime_enable(dev);
+       if (!pm_runtime_enabled(dev)) {
+               ret = mt2701_afe_runtime_resume(dev);
+               if (ret)
+                       goto err_pm_disable;
+       }
+       pm_runtime_get_sync(dev);
+
+       ret = snd_soc_register_platform(dev, &mtk_afe_pcm_platform);
        if (ret) {
                dev_warn(dev, "err_platform\n");
                goto err_platform;
        }
 
-       ret = snd_soc_register_component(&pdev->dev,
-                                        &mt2701_afe_pcm_dai_component,
-                                        mt2701_afe_pcm_dais,
-                                        ARRAY_SIZE(mt2701_afe_pcm_dais));
+       ret = snd_soc_add_component(dev, component,
+                                   &mt2701_afe_pcm_dai_component,
+                                   mt2701_afe_pcm_dais,
+                                   ARRAY_SIZE(mt2701_afe_pcm_dais));
        if (ret) {
                dev_warn(dev, "err_dai_component\n");
                goto err_dai_component;
        }
 
-       mt2701_afe_runtime_resume(&pdev->dev);
-
        return 0;
 
 err_dai_component:
-       snd_soc_unregister_component(&pdev->dev);
-
+       snd_soc_unregister_platform(dev);
 err_platform:
-       snd_soc_unregister_platform(&pdev->dev);
-
+       pm_runtime_put_sync(dev);
 err_pm_disable:
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_disable(dev);
+err_init_clock:
+       kfree(component);
 
        return ret;
 }
 
 static int mt2701_afe_pcm_dev_remove(struct platform_device *pdev)
 {
-       struct mtk_base_afe *afe = platform_get_drvdata(pdev);
-
+       pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        if (!pm_runtime_status_suspended(&pdev->dev))
                mt2701_afe_runtime_suspend(&pdev->dev);
-       pm_runtime_put_sync(&pdev->dev);
 
        snd_soc_unregister_component(&pdev->dev);
        snd_soc_unregister_platform(&pdev->dev);
-       /* disable afe clock */
-       mt2701_afe_disable_clock(afe);
+
        return 0;
 }
 
@@ -1670,4 +1578,3 @@ module_platform_driver(mt2701_afe_pcm_driver);
 MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 2701");
 MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
 MODULE_LICENSE("GPL v2");
-
index bb62b1c..18e6769 100644 (file)
 #ifndef _MT2701_REG_H_
 #define _MT2701_REG_H_
 
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/pm_runtime.h>
-#include <sound/soc.h>
-#include "mt2701-afe-common.h"
-
-/*****************************************************************************
- *                  R E G I S T E R       D E F I N I T I O N
- *****************************************************************************/
 #define AUDIO_TOP_CON0 0x0000
 #define AUDIO_TOP_CON4 0x0010
 #define AUDIO_TOP_CON5 0x0014
 #define AFE_DAI_BASE 0x1370
 #define AFE_DAI_CUR 0x137c
 
-/* AUDIO_TOP_CON0 (0x0000) */
-#define AUDIO_TOP_CON0_A1SYS_A2SYS_ON  (0x3 << 0)
-#define AUDIO_TOP_CON0_PDN_AFE         (0x1 << 2)
-#define AUDIO_TOP_CON0_PDN_APLL_CK     (0x1 << 23)
-
-/* AUDIO_TOP_CON4 (0x0010) */
-#define AUDIO_TOP_CON4_I2SO1_PWN       (0x1 << 6)
-#define AUDIO_TOP_CON4_PDN_A1SYS       (0x1 << 21)
-#define AUDIO_TOP_CON4_PDN_A2SYS       (0x1 << 22)
-#define AUDIO_TOP_CON4_PDN_AFE_CONN    (0x1 << 23)
-#define AUDIO_TOP_CON4_PDN_MRGIF       (0x1 << 25)
-
 /* AFE_DAIBT_CON0 (0x001c) */
 #define AFE_DAIBT_CON0_DAIBT_EN                (0x1 << 0)
 #define AFE_DAIBT_CON0_BT_FUNC_EN      (0x1 << 1)
 #define AFE_MRGIF_CON_I2S_MODE_MASK    (0xf << 20)
 #define AFE_MRGIF_CON_I2S_MODE_32K     (0x4 << 20)
 
-/* ASYS_I2SO1_CON (0x061c) */
-#define ASYS_I2SO1_CON_FS              (0x1f << 8)
-#define ASYS_I2SO1_CON_FS_SET(x)       ((x) << 8)
-#define ASYS_I2SO1_CON_MULTI_CH                (0x1 << 16)
-#define ASYS_I2SO1_CON_SIDEGEN         (0x1 << 30)
-#define ASYS_I2SO1_CON_I2S_EN          (0x1 << 0)
-/* 0:EIAJ 1:I2S */
-#define ASYS_I2SO1_CON_I2S_MODE                (0x1 << 3)
-#define ASYS_I2SO1_CON_WIDE_MODE       (0x1 << 1)
-#define ASYS_I2SO1_CON_WIDE_MODE_SET(x)        ((x) << 1)
-
-/* PWR2_TOP_CON (0x0634) */
-#define PWR2_TOP_CON_INIT_VAL          (0xffe1ffff)
-
-/* ASYS_IRQ_CLR (0x07c0) */
-#define ASYS_IRQ_CLR_ALL               (0xffffffff)
+/* ASYS_TOP_CON (0x0600) */
+#define ASYS_TOP_CON_ASYS_TIMING_ON            (0x3 << 0)
 
 /* PWR2_ASM_CON1 (0x1070) */
 #define PWR2_ASM_CON1_INIT_VAL         (0x492492)
 #define ASYS_I2S_CON_WIDE_MODE_SET(x)  ((x) << 1)
 #define ASYS_I2S_IN_PHASE_FIX          (0x1 << 31)
 
-#define AFE_END_ADDR 0x15e0
 #endif
index 8a643a3..c7f7f8a 100644 (file)
@@ -1083,7 +1083,7 @@ static int mt8173_afe_init_audio_clk(struct mtk_base_afe *afe)
 static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
 {
        int ret, i;
-       unsigned int irq_id;
+       int irq_id;
        struct mtk_base_afe *afe;
        struct mt8173_afe_private *afe_priv;
        struct resource *res;
@@ -1105,9 +1105,9 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
        afe->dev = &pdev->dev;
 
        irq_id = platform_get_irq(pdev, 0);
-       if (!irq_id) {
+       if (irq_id <= 0) {
                dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name);
-               return -ENXIO;
+               return irq_id < 0 ? irq_id : -ENXIO;
        }
        ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
                               0, "Afe_ISR_Handle", (void *)afe);
index 99c1521..5a9a548 100644 (file)
@@ -37,8 +37,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_rt5514_routes[] = {
        {"Sub DMIC1R", NULL, "Int Mic"},
        {"Headphone", NULL, "HPOL"},
        {"Headphone", NULL, "HPOR"},
-       {"Headset Mic", NULL, "micbias1"},
-       {"Headset Mic", NULL, "micbias2"},
        {"IN1P", NULL, "Headset Mic"},
        {"IN1N", NULL, "Headset Mic"},
 };
index 42de84c..b724808 100644 (file)
@@ -40,8 +40,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_rt5676_routes[] = {
        {"Headphone", NULL, "HPOL"},
        {"Headphone", NULL, "HPOR"},
        {"Headphone", NULL, "Sub AIF2TX"}, /* IF2 ADC to 5650  */
-       {"Headset Mic", NULL, "micbias1"},
-       {"Headset Mic", NULL, "micbias2"},
        {"IN1P", NULL, "Headset Mic"},
        {"IN1N", NULL, "Headset Mic"},
        {"Sub AIF2RX", NULL, "Headset Mic"}, /* IF2 DAC from 5650  */
index e69c141..40ebefd 100644 (file)
@@ -51,8 +51,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
        {"DMIC R1", NULL, "Int Mic"},
        {"Headphone", NULL, "HPOL"},
        {"Headphone", NULL, "HPOR"},
-       {"Headset Mic", NULL, "micbias1"},
-       {"Headset Mic", NULL, "micbias2"},
        {"IN1P", NULL, "Headset Mic"},
        {"IN1N", NULL, "Headset Mic"},
 };
index 2ed3240..2b3f240 100644 (file)
@@ -93,6 +93,14 @@ static struct snd_soc_dai_link mxs_sgtl5000_dai[] = {
        },
 };
 
+static const struct snd_soc_dapm_widget mxs_sgtl5000_dapm_widgets[] = {
+       SND_SOC_DAPM_MIC("Mic Jack", NULL),
+       SND_SOC_DAPM_LINE("Line In Jack", NULL),
+       SND_SOC_DAPM_HP("Headphone Jack", NULL),
+       SND_SOC_DAPM_SPK("Line Out Jack", NULL),
+       SND_SOC_DAPM_SPK("Ext Spk", NULL),
+};
+
 static struct snd_soc_card mxs_sgtl5000 = {
        .name           = "mxs_sgtl5000",
        .owner          = THIS_MODULE,
@@ -141,10 +149,23 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev)
 
        card->dev = &pdev->dev;
 
+       if (of_find_property(np, "audio-routing", NULL)) {
+               card->dapm_widgets = mxs_sgtl5000_dapm_widgets;
+               card->num_dapm_widgets = ARRAY_SIZE(mxs_sgtl5000_dapm_widgets);
+
+               ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to parse audio-routing (%d)\n",
+                               ret);
+                       return ret;
+               }
+       }
+
        ret = devm_snd_soc_register_card(&pdev->dev, card);
        if (ret) {
-               dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
-                       ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+                               ret);
                return ret;
        }
 
index b6615af..81b09d7 100644 (file)
@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,
 
        /* polling the AC_R_FINISH */
        while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH)
-                                                               && timeout--)
+                                                               && --timeout)
                mdelay(1);
 
        if (!timeout) {
@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
 
        /* polling the AC_W_FINISH */
        while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH)
-                                                               && timeout--)
+                                                               && --timeout)
                mdelay(1);
 
        if (!timeout)
@@ -345,11 +345,10 @@ static int nuc900_ac97_drvprobe(struct platform_device *pdev)
                goto out;
        }
 
-       nuc900_audio->irq_num = platform_get_irq(pdev, 0);
-       if (!nuc900_audio->irq_num) {
-               ret = -EBUSY;
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0)
                goto out;
-       }
+       nuc900_audio->irq_num = ret;
 
        nuc900_ac97_data = nuc900_audio;
 
index d402196..cb72c1e 100644 (file)
@@ -105,7 +105,7 @@ static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol,
        int pin, changed = 0;
 
        /* Refuse any mode changes if we are not able to control the codec. */
-       if (!cx20442_codec->hw_write)
+       if (!cx20442_codec->component.card->pop_time)
                return -EUNATCH;
 
        if (ucontrol->value.enumerated.item[0] >= control->items)
@@ -345,7 +345,7 @@ static void cx81801_receive(struct tty_struct *tty,
        if (!codec)
                return;
 
-       if (!codec->hw_write) {
+       if (!codec->component.card->pop_time) {
                /* First modem response, complete setup procedure */
 
                /* Initialize timer used for config pulse generation */
index d49adc8..7044287 100644 (file)
@@ -43,7 +43,7 @@ struct apq8016_sbc_data {
 static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-       struct snd_soc_codec *codec;
+       struct snd_soc_component *component;
        struct snd_soc_dai_link *dai_link = rtd->dai_link;
        struct snd_soc_card *card = rtd->card;
        struct apq8016_sbc_data *pdata = snd_soc_card_get_drvdata(card);
@@ -92,7 +92,7 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
 
                jack = pdata->jack.jack;
 
-               snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_MEDIA);
+               snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
                snd_jack_set_key(jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
                snd_jack_set_key(jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
                snd_jack_set_key(jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
@@ -102,15 +102,15 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
        for (i = 0 ; i < dai_link->num_codecs; i++) {
                struct snd_soc_dai *dai = rtd->codec_dais[i];
 
-               codec = dai->codec;
+               component = dai->component;
                /* Set default mclk for internal codec */
-               rval = snd_soc_codec_set_sysclk(codec, 0, 0, DEFAULT_MCLK_RATE,
+               rval = snd_soc_component_set_sysclk(component, 0, 0, DEFAULT_MCLK_RATE,
                                       SND_SOC_CLOCK_IN);
                if (rval != 0 && rval != -ENOTSUPP) {
                        dev_warn(card->dev, "Failed to set mclk: %d\n", rval);
                        return rval;
                }
-               rval = snd_soc_codec_set_jack(codec, &pdata->jack, NULL);
+               rval = snd_soc_component_set_jack(component, &pdata->jack, NULL);
                if (rval != 0 && rval != -ENOTSUPP) {
                        dev_warn(card->dev, "Failed to set jack: %d\n", rval);
                        return rval;
index d64fbbd..fa6cd1d 100644 (file)
@@ -206,7 +206,8 @@ static int rockchip_sound_da7219_init(struct snd_soc_pcm_runtime *rtd)
                return ret;
        }
 
-       snd_jack_set_key(rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+       snd_jack_set_key(
+               rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
        snd_jack_set_key(
                rockchip_sound_jack.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
        snd_jack_set_key(
index 908211e..950823d 100644 (file)
@@ -328,6 +328,7 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
                val |= I2S_CHN_4;
                break;
        case 2:
+       case 1:
                val |= I2S_CHN_2;
                break;
        default:
@@ -460,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
        },
        .capture = {
                .stream_name = "Capture",
-               .channels_min = 2,
+               .channels_min = 1,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_8000_192000,
                .formats = (SNDRV_PCM_FMTBIT_S8 |
@@ -504,6 +505,7 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
        case I2S_INTCR:
        case I2S_XFER:
        case I2S_CLR:
+       case I2S_TXDR:
        case I2S_RXDR:
        case I2S_FIFOLR:
        case I2S_INTSR:
@@ -518,6 +520,9 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
        switch (reg) {
        case I2S_INTSR:
        case I2S_CLR:
+       case I2S_FIFOLR:
+       case I2S_TXDR:
+       case I2S_RXDR:
                return true;
        default:
                return false;
@@ -527,6 +532,8 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
 static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
+       case I2S_RXDR:
+               return true;
        default:
                return false;
        }
@@ -654,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
        }
 
        if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
-               if (val >= 2 && val <= 8)
+               if (val >= 1 && val <= 8)
                        soc_dai->capture.channels_max = val;
        }
 
index ee5055d..a89fe9b 100644 (file)
@@ -322,26 +322,30 @@ static int rk_spdif_probe(struct platform_device *pdev)
        spdif->mclk = devm_clk_get(&pdev->dev, "mclk");
        if (IS_ERR(spdif->mclk)) {
                dev_err(&pdev->dev, "Can't retrieve rk_spdif master clock\n");
-               return PTR_ERR(spdif->mclk);
+               ret = PTR_ERR(spdif->mclk);
+               goto err_disable_hclk;
        }
 
        ret = clk_prepare_enable(spdif->mclk);
        if (ret) {
                dev_err(spdif->dev, "clock enable failed %d\n", ret);
-               return ret;
+               goto err_disable_clocks;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        regs = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(regs))
-               return PTR_ERR(regs);
+       if (IS_ERR(regs)) {
+               ret = PTR_ERR(regs);
+               goto err_disable_clocks;
+       }
 
        spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs,
                                                  &rk_spdif_regmap_config);
        if (IS_ERR(spdif->regmap)) {
                dev_err(&pdev->dev,
                        "Failed to initialise managed register map\n");
-               return PTR_ERR(spdif->regmap);
+               ret = PTR_ERR(spdif->regmap);
+               goto err_disable_clocks;
        }
 
        spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR;
@@ -373,6 +377,10 @@ static int rk_spdif_probe(struct platform_device *pdev)
 
 err_pm_runtime:
        pm_runtime_disable(&pdev->dev);
+err_disable_clocks:
+       clk_disable_unprepare(spdif->mclk);
+err_disable_hclk:
+       clk_disable_unprepare(spdif->hclk);
 
        return ret;
 }
index 34deba4..0e66cd8 100644 (file)
@@ -60,13 +60,13 @@ static int bells_set_bias_level(struct snd_soc_card *card,
 {
        struct snd_soc_pcm_runtime *rtd;
        struct snd_soc_dai *codec_dai;
-       struct snd_soc_codec *codec;
+       struct snd_soc_component *component;
        struct bells_drvdata *bells = card->drvdata;
        int ret;
 
        rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
        codec_dai = rtd->codec_dai;
-       codec = codec_dai->codec;
+       component = codec_dai->component;
 
        if (dapm->dev != codec_dai->dev)
                return 0;
@@ -76,7 +76,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
                if (dapm->bias_level != SND_SOC_BIAS_STANDBY)
                        break;
 
-               ret = snd_soc_codec_set_pll(codec, WM5102_FLL1,
+               ret = snd_soc_component_set_pll(component, WM5102_FLL1,
                                            ARIZONA_FLL_SRC_MCLK1,
                                            MCLK_RATE,
                                            bells->sysclk_rate);
@@ -84,7 +84,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
                        pr_err("Failed to start FLL: %d\n", ret);
 
                if (bells->asyncclk_rate) {
-                       ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+                       ret = snd_soc_component_set_pll(component, WM5102_FLL2,
                                                    ARIZONA_FLL_SRC_AIF2BCLK,
                                                    BCLK2_RATE,
                                                    bells->asyncclk_rate);
@@ -106,27 +106,27 @@ static int bells_set_bias_level_post(struct snd_soc_card *card,
 {
        struct snd_soc_pcm_runtime *rtd;
        struct snd_soc_dai *codec_dai;
-       struct snd_soc_codec *codec;
+       struct snd_soc_component *component;
        struct bells_drvdata *bells = card->drvdata;
        int ret;
 
        rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
        codec_dai = rtd->codec_dai;
-       codec = codec_dai->codec;
+       component = codec_dai->component;
 
        if (dapm->dev != codec_dai->dev)
                return 0;
 
        switch (level) {
        case SND_SOC_BIAS_STANDBY:
-               ret = snd_soc_codec_set_pll(codec, WM5102_FLL1, 0, 0, 0);
+               ret = snd_soc_component_set_pll(component, WM5102_FLL1, 0, 0, 0);
                if (ret < 0) {
                        pr_err("Failed to stop FLL: %d\n", ret);
                        return ret;
                }
 
                if (bells->asyncclk_rate) {
-                       ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+                       ret = snd_soc_component_set_pll(component, WM5102_FLL2,
                                                    0, 0, 0);
                        if (ret < 0) {
                                pr_err("Failed to stop FLL: %d\n", ret);
@@ -148,8 +148,8 @@ static int bells_late_probe(struct snd_soc_card *card)
 {
        struct bells_drvdata *bells = card->drvdata;
        struct snd_soc_pcm_runtime *rtd;
-       struct snd_soc_codec *wm0010;
-       struct snd_soc_codec *codec;
+       struct snd_soc_component *wm0010;
+       struct snd_soc_component *component;
        struct snd_soc_dai *aif1_dai;
        struct snd_soc_dai *aif2_dai;
        struct snd_soc_dai *aif3_dai;
@@ -157,22 +157,22 @@ static int bells_late_probe(struct snd_soc_card *card)
        int ret;
 
        rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_AP_DSP].name);
-       wm0010 = rtd->codec;
+       wm0010 = rtd->codec_dai->component;
 
        rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
-       codec = rtd->codec;
+       component = rtd->codec_dai->component;
        aif1_dai = rtd->codec_dai;
 
-       ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+       ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_SYSCLK,
                                       ARIZONA_CLK_SRC_FLL1,
                                       bells->sysclk_rate,
                                       SND_SOC_CLOCK_IN);
        if (ret != 0) {
-               dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
+               dev_err(component->dev, "Failed to set SYSCLK: %d\n", ret);
                return ret;
        }
 
-       ret = snd_soc_codec_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
+       ret = snd_soc_component_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
        if (ret != 0) {
                dev_err(wm0010->dev, "Failed to set WM0010 clock: %d\n", ret);
                return ret;
@@ -182,20 +182,20 @@ static int bells_late_probe(struct snd_soc_card *card)
        if (ret != 0)
                dev_err(aif1_dai->dev, "Failed to set AIF1 clock: %d\n", ret);
 
-       ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_OPCLK, 0,
+       ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_OPCLK, 0,
                                       SYS_MCLK_RATE, SND_SOC_CLOCK_OUT);
        if (ret != 0)
-               dev_err(codec->dev, "Failed to set OPCLK: %d\n", ret);
+               dev_err(component->dev, "Failed to set OPCLK: %d\n", ret);
 
        if (card->num_rtd == DAI_CODEC_CP)
                return 0;
 
-       ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_ASYNCCLK,
+       ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_ASYNCCLK,
                                       ARIZONA_CLK_SRC_FLL2,
                                       bells->asyncclk_rate,
                                       SND_SOC_CLOCK_IN);
        if (ret != 0) {
-               dev_err(codec->dev, "Failed to set ASYNCCLK: %d\n", ret);
+               dev_err(component->dev, "Failed to set ASYNCCLK: %d\n", ret);
                return ret;
        }
 
@@ -221,7 +221,7 @@ static int bells_late_probe(struct snd_soc_card *card)
                return ret;
        }
 
-       ret = snd_soc_codec_set_sysclk(wm9081_dai->codec, WM9081_SYSCLK_MCLK,
+       ret = snd_soc_component_set_sysclk(wm9081_dai->component, WM9081_SYSCLK_MCLK,
                                       0, SYS_MCLK_RATE, 0);
        if (ret != 0) {
                dev_err(wm9081_dai->dev, "Failed to set MCLK: %d\n", ret);
index 8ddb087..4672688 100644 (file)
@@ -222,7 +222,7 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *cmd_mod,
                                   NULL, &val, NULL);
 
        val  = val      << shift;
-       mask = 0xffff   << shift;
+       mask = 0x0f1f   << shift;
 
        rsnd_mod_bset(adg_mod, CMDOUT_TIMSEL, mask, val);
 
@@ -250,7 +250,7 @@ int rsnd_adg_set_src_timesel_gen2(struct rsnd_mod *src_mod,
 
        in   = in       << shift;
        out  = out      << shift;
-       mask = 0xffff   << shift;
+       mask = 0x0f1f   << shift;
 
        switch (id / 2) {
        case 0:
@@ -380,7 +380,7 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
                        ckr = 0x80000000;
        }
 
-       rsnd_mod_bset(adg_mod, BRGCKR, 0x80FF0000, adg->ckr | ckr);
+       rsnd_mod_bset(adg_mod, BRGCKR, 0x80770000, adg->ckr | ckr);
        rsnd_mod_write(adg_mod, BRRA,  adg->rbga);
        rsnd_mod_write(adg_mod, BRRB,  adg->rbgb);
 
index c70eb20..64d5ecb 100644 (file)
@@ -197,16 +197,27 @@ int rsnd_io_is_working(struct rsnd_dai_stream *io)
        return 0;
 }
 
-int rsnd_runtime_channel_original(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
+                                             struct snd_pcm_hw_params *params)
 {
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 
-       return runtime->channels;
+       /*
+        * params will be added when refine
+        * see
+        *      __rsnd_soc_hw_rule_rate()
+        *      __rsnd_soc_hw_rule_channels()
+        */
+       if (params)
+               return params_channels(params);
+       else
+               return runtime->channels;
 }
 
-int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
+                                              struct snd_pcm_hw_params *params)
 {
-       int chan = rsnd_runtime_channel_original(io);
+       int chan = rsnd_runtime_channel_original_with_params(io, params);
        struct rsnd_mod *ctu_mod = rsnd_io_to_mod_ctu(io);
 
        if (ctu_mod) {
@@ -219,12 +230,13 @@ int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
        return chan;
 }
 
-int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
+                                            struct snd_pcm_hw_params *params)
 {
        struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
        int chan = rsnd_io_is_play(io) ?
-               rsnd_runtime_channel_after_ctu(io) :
-               rsnd_runtime_channel_original(io);
+               rsnd_runtime_channel_after_ctu_with_params(io, params) :
+               rsnd_runtime_channel_original_with_params(io, params);
 
        /* Use Multi SSI */
        if (rsnd_runtime_is_ssi_multi(io))
@@ -262,10 +274,10 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
        struct device *dev = rsnd_priv_to_dev(priv);
 
-       switch (runtime->sample_bits) {
+       switch (snd_pcm_format_width(runtime->format)) {
        case 16:
                return 8 << 16;
-       case 32:
+       case 24:
                return 0 << 16;
        }
 
@@ -282,11 +294,12 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
        struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
        struct rsnd_mod *target;
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-       u32 val = 0x76543210;
-       u32 mask = ~0;
 
        /*
-        * *Hardware* L/R and *Software* L/R are inverted.
+        * *Hardware* L/R and *Software* L/R are inverted for 16bit data.
+        *          31..16 15...0
+        *      HW: [L ch] [R ch]
+        *      SW: [R ch] [L ch]
         * We need to care about inversion timing to control
         * Playback/Capture correctly.
         * The point is [DVC] needs *Hardware* L/R, [MEM] needs *Software* L/R
@@ -313,27 +326,13 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
                target = cmd ? cmd : ssiu;
        }
 
-       mask <<= runtime->channels * 4;
-       val = val & mask;
-
-       switch (runtime->sample_bits) {
-       case 16:
-               val |= 0x67452301 & ~mask;
-               break;
-       case 32:
-               val |= 0x76543210 & ~mask;
-               break;
-       }
-
-       /*
-        * exchange channeles on SRC if possible,
-        * otherwise, R/L volume settings on DVC
-        * changes inverted channels
-        */
-       if (mod == target)
-               return val;
-       else
+       /* Non target mod or 24bit data needs normal DALIGN */
+       if ((snd_pcm_format_width(runtime->format) != 16) ||
+           (mod != target))
                return 0x76543210;
+       /* Target mod needs inverted DALIGN when 16bit */
+       else
+               return 0x67452301;
 }
 
 u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
@@ -363,12 +362,8 @@ u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
         * HW    24bit data is located as 0x******00
         *
         */
-       switch (runtime->sample_bits) {
-       case 16:
+       if (snd_pcm_format_width(runtime->format) == 16)
                return 0;
-       case 32:
-               break;
-       }
 
        for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
                tmod = rsnd_io_to_mod(io, mods[i]);
@@ -616,8 +611,6 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
-               rsnd_dai_stream_init(io, substream);
-
                ret = rsnd_dai_call(init, io, priv);
                if (ret < 0)
                        goto dai_trigger_end;
@@ -639,7 +632,6 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
 
                ret |= rsnd_dai_call(quit, io, priv);
 
-               rsnd_dai_stream_quit(io);
                break;
        default:
                ret = -EINVAL;
@@ -784,8 +776,9 @@ static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
        return snd_interval_refine(iv, &p);
 }
 
-static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
-                                struct snd_pcm_hw_rule *rule)
+static int __rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
+                                  struct snd_pcm_hw_rule *rule,
+                                  int is_play)
 {
        struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
        struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
@@ -793,25 +786,37 @@ static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
        struct snd_soc_dai *dai = rule->private;
        struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
        struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+       struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
 
        /*
         * possible sampling rate limitation is same as
         * 2ch if it supports multi ssi
+        * and same as 8ch if TDM 6ch (see rsnd_ssi_config_init())
         */
        ic = *ic_;
-       if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
-               ic.min = 2;
-               ic.max = 2;
-       }
+       ic.min =
+       ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
 
        return rsnd_soc_hw_rule(priv, rsnd_soc_hw_rate_list,
                                ARRAY_SIZE(rsnd_soc_hw_rate_list),
                                &ic, ir);
 }
 
+static int rsnd_soc_hw_rule_rate_playback(struct snd_pcm_hw_params *params,
+                                struct snd_pcm_hw_rule *rule)
+{
+       return __rsnd_soc_hw_rule_rate(params, rule, 1);
+}
+
+static int rsnd_soc_hw_rule_rate_capture(struct snd_pcm_hw_params *params,
+                                         struct snd_pcm_hw_rule *rule)
+{
+       return __rsnd_soc_hw_rule_rate(params, rule, 0);
+}
 
-static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
-                                    struct snd_pcm_hw_rule *rule)
+static int __rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
+                                      struct snd_pcm_hw_rule *rule,
+                                      int is_play)
 {
        struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
        struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
@@ -819,22 +824,34 @@ static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
        struct snd_soc_dai *dai = rule->private;
        struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
        struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+       struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
 
        /*
         * possible sampling rate limitation is same as
         * 2ch if it supports multi ssi
+        * and same as 8ch if TDM 6ch (see rsnd_ssi_config_init())
         */
        ic = *ic_;
-       if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
-               ic.min = 2;
-               ic.max = 2;
-       }
+       ic.min =
+       ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
 
        return rsnd_soc_hw_rule(priv, rsnd_soc_hw_channels_list,
                                ARRAY_SIZE(rsnd_soc_hw_channels_list),
                                ir, &ic);
 }
 
+static int rsnd_soc_hw_rule_channels_playback(struct snd_pcm_hw_params *params,
+                                             struct snd_pcm_hw_rule *rule)
+{
+       return __rsnd_soc_hw_rule_channels(params, rule, 1);
+}
+
+static int rsnd_soc_hw_rule_channels_capture(struct snd_pcm_hw_params *params,
+                                            struct snd_pcm_hw_rule *rule)
+{
+       return __rsnd_soc_hw_rule_channels(params, rule, 0);
+}
+
 static const struct snd_pcm_hardware rsnd_pcm_hardware = {
        .info =         SNDRV_PCM_INFO_INTERLEAVED      |
                        SNDRV_PCM_INFO_MMAP             |
@@ -859,6 +876,8 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
        int ret;
        int i;
 
+       rsnd_dai_stream_init(io, substream);
+
        /*
         * Channel Limitation
         * It depends on Platform design
@@ -886,11 +905,17 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
         * It depends on Clock Master Mode
         */
        if (rsnd_rdai_is_clk_master(rdai)) {
+               int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
                snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
-                                   rsnd_soc_hw_rule_rate, dai,
+                                   is_play ? rsnd_soc_hw_rule_rate_playback :
+                                             rsnd_soc_hw_rule_rate_capture,
+                                   dai,
                                    SNDRV_PCM_HW_PARAM_CHANNELS, -1);
                snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
-                                   rsnd_soc_hw_rule_channels, dai,
+                                   is_play ? rsnd_soc_hw_rule_channels_playback :
+                                             rsnd_soc_hw_rule_channels_capture,
+                                   dai,
                                    SNDRV_PCM_HW_PARAM_RATE, -1);
        }
 
@@ -915,6 +940,8 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
         * call rsnd_dai_call without spinlock
         */
        rsnd_dai_call(nolock_stop, io, priv);
+
+       rsnd_dai_stream_quit(io);
 }
 
 static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
@@ -990,7 +1017,7 @@ of_node_compatible:
 
 static void __rsnd_dai_probe(struct rsnd_priv *priv,
                             struct device_node *dai_np,
-                            int dai_i, int is_graph)
+                            int dai_i)
 {
        struct device_node *playback, *capture;
        struct rsnd_dai_stream *io_playback;
@@ -1089,13 +1116,13 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
        dai_i = 0;
        if (is_graph) {
                for_each_endpoint_of_node(dai_node, dai_np) {
-                       __rsnd_dai_probe(priv, dai_np, dai_i, is_graph);
+                       __rsnd_dai_probe(priv, dai_np, dai_i);
                        rsnd_ssi_parse_hdmi_connection(priv, dai_np, dai_i);
                        dai_i++;
                }
        } else {
                for_each_child_of_node(dai_node, dai_np)
-                       __rsnd_dai_probe(priv, dai_np, dai_i++, is_graph);
+                       __rsnd_dai_probe(priv, dai_np, dai_i++);
        }
 
        return 0;
@@ -1332,8 +1359,8 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
 
        return snd_pcm_lib_preallocate_pages_for_all(
                rtd->pcm,
-               SNDRV_DMA_TYPE_CONTINUOUS,
-               snd_dma_continuous_data(GFP_KERNEL),
+               SNDRV_DMA_TYPE_DEV,
+               rtd->card->snd_card->dev,
                PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
 }
 
@@ -1496,6 +1523,8 @@ static int rsnd_remove(struct platform_device *pdev)
        };
        int ret = 0, i;
 
+       snd_soc_disconnect_sync(&pdev->dev);
+
        pm_runtime_disable(&pdev->dev);
 
        for_each_rsnd_dai(rdai, priv, i) {
index fd557ab..41de234 100644 (file)
 struct rsnd_dmaen {
        struct dma_chan         *chan;
        dma_cookie_t            cookie;
-       dma_addr_t              dma_buf;
        unsigned int            dma_len;
-       unsigned int            dma_period;
-       unsigned int            dma_cnt;
 };
 
 struct rsnd_dmapp {
@@ -71,69 +68,10 @@ static struct rsnd_mod mem = {
 /*
  *             Audio DMAC
  */
-#define rsnd_dmaen_sync(dmaen, io, i)  __rsnd_dmaen_sync(dmaen, io, i, 1)
-#define rsnd_dmaen_unsync(dmaen, io, i)        __rsnd_dmaen_sync(dmaen, io, i, 0)
-static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
-                             int i, int sync)
-{
-       struct device *dev = dmaen->chan->device->dev;
-       enum dma_data_direction dir;
-       int is_play = rsnd_io_is_play(io);
-       dma_addr_t buf;
-       int len, max;
-       size_t period;
-
-       len     = dmaen->dma_len;
-       period  = dmaen->dma_period;
-       max     = len / period;
-       i       = i % max;
-       buf     = dmaen->dma_buf + (period * i);
-
-       dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
-       if (sync)
-               dma_sync_single_for_device(dev, buf, period, dir);
-       else
-               dma_sync_single_for_cpu(dev, buf, period, dir);
-}
-
 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
                                  struct rsnd_dai_stream *io)
 {
-       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-       struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
-       struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
-       bool elapsed = false;
-       unsigned long flags;
-
-       /*
-        * Renesas sound Gen1 needs 1 DMAC,
-        * Gen2 needs 2 DMAC.
-        * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
-        * But, Audio-DMAC-peri-peri doesn't have interrupt,
-        * and this driver is assuming that here.
-        */
-       spin_lock_irqsave(&priv->lock, flags);
-
-       if (rsnd_io_is_working(io)) {
-               rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
-
-               /*
-                * Next period is already started.
-                * Let's sync Next Next period
-                * see
-                *      rsnd_dmaen_start()
-                */
-               rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
-
-               elapsed = true;
-
-               dmaen->dma_cnt++;
-       }
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (elapsed)
+       if (rsnd_io_is_working(io))
                rsnd_dai_period_elapsed(io);
 }
 
@@ -165,14 +103,8 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
        struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
        struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
 
-       if (dmaen->chan) {
-               int is_play = rsnd_io_is_play(io);
-
+       if (dmaen->chan)
                dmaengine_terminate_all(dmaen->chan);
-               dma_unmap_single(dmaen->chan->device->dev,
-                                dmaen->dma_buf, dmaen->dma_len,
-                                is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-       }
 
        return 0;
 }
@@ -237,11 +169,7 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
        struct device *dev = rsnd_priv_to_dev(priv);
        struct dma_async_tx_descriptor *desc;
        struct dma_slave_config cfg = {};
-       dma_addr_t buf;
-       size_t len;
-       size_t period;
        int is_play = rsnd_io_is_play(io);
-       int i;
        int ret;
 
        cfg.direction   = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
@@ -258,19 +186,10 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
        if (ret < 0)
                return ret;
 
-       len     = snd_pcm_lib_buffer_bytes(substream);
-       period  = snd_pcm_lib_period_bytes(substream);
-       buf     = dma_map_single(dmaen->chan->device->dev,
-                                substream->runtime->dma_area,
-                                len,
-                                is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-       if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
-               dev_err(dev, "dma map failed\n");
-               return -EIO;
-       }
-
        desc = dmaengine_prep_dma_cyclic(dmaen->chan,
-                                        buf, len, period,
+                                        substream->runtime->dma_addr,
+                                        snd_pcm_lib_buffer_bytes(substream),
+                                        snd_pcm_lib_period_bytes(substream),
                                         is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
                                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
@@ -282,18 +201,7 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
        desc->callback          = rsnd_dmaen_complete;
        desc->callback_param    = rsnd_mod_get(dma);
 
-       dmaen->dma_buf          = buf;
-       dmaen->dma_len          = len;
-       dmaen->dma_period       = period;
-       dmaen->dma_cnt          = 0;
-
-       /*
-        * synchronize this and next period
-        * see
-        *      __rsnd_dmaen_complete()
-        */
-       for (i = 0; i < 2; i++)
-               rsnd_dmaen_sync(dmaen, io, i);
+       dmaen->dma_len          = snd_pcm_lib_buffer_bytes(substream);
 
        dmaen->cookie = dmaengine_submit(desc);
        if (dmaen->cookie < 0) {
index 57cd2bc..ad65235 100644 (file)
@@ -399,9 +399,18 @@ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
                struct device_node *playback,
                struct device_node *capture);
 
-int rsnd_runtime_channel_original(struct rsnd_dai_stream *io);
-int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io);
-int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io);
+#define rsnd_runtime_channel_original(io) \
+       rsnd_runtime_channel_original_with_params(io, NULL)
+int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
+                               struct snd_pcm_hw_params *params);
+#define rsnd_runtime_channel_after_ctu(io)                     \
+       rsnd_runtime_channel_after_ctu_with_params(io, NULL)
+int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
+                               struct snd_pcm_hw_params *params);
+#define rsnd_runtime_channel_for_ssi(io) \
+       rsnd_runtime_channel_for_ssi_with_params(io, NULL)
+int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
+                                struct snd_pcm_hw_params *params);
 int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io);
 int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io);
 
index fece1e5..97a9db8 100644 (file)
@@ -79,8 +79,8 @@ struct rsnd_ssi {
        int irq;
        unsigned int usrcnt;
 
+       /* for PIO */
        int byte_pos;
-       int period_pos;
        int byte_per_period;
        int next_period_byte;
 };
@@ -371,11 +371,11 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
        if (rsnd_io_is_play(io))
                cr_own |= TRMD;
 
-       switch (runtime->sample_bits) {
+       switch (snd_pcm_format_width(runtime->format)) {
        case 16:
                cr_own |= DWL_16;
                break;
-       case 32:
+       case 24:
                cr_own |= DWL_24;
                break;
        }
@@ -414,59 +414,6 @@ static void rsnd_ssi_register_setup(struct rsnd_mod *mod)
                                        ssi->cr_en);
 }
 
-static void rsnd_ssi_pointer_init(struct rsnd_mod *mod,
-                                 struct rsnd_dai_stream *io)
-{
-       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-
-       ssi->byte_pos           = 0;
-       ssi->period_pos         = 0;
-       ssi->byte_per_period    = runtime->period_size *
-                                 runtime->channels *
-                                 samples_to_bytes(runtime, 1);
-       ssi->next_period_byte   = ssi->byte_per_period;
-}
-
-static int rsnd_ssi_pointer_offset(struct rsnd_mod *mod,
-                                  struct rsnd_dai_stream *io,
-                                  int additional)
-{
-       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-       int pos = ssi->byte_pos + additional;
-
-       pos %= (runtime->periods * ssi->byte_per_period);
-
-       return pos;
-}
-
-static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod,
-                                   struct rsnd_dai_stream *io,
-                                   int byte)
-{
-       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-
-       ssi->byte_pos += byte;
-
-       if (ssi->byte_pos >= ssi->next_period_byte) {
-               struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-
-               ssi->period_pos++;
-               ssi->next_period_byte += ssi->byte_per_period;
-
-               if (ssi->period_pos >= runtime->periods) {
-                       ssi->byte_pos = 0;
-                       ssi->period_pos = 0;
-                       ssi->next_period_byte = ssi->byte_per_period;
-               }
-
-               return true;
-       }
-
-       return false;
-}
-
 /*
  *     SSI mod common functions
  */
@@ -480,8 +427,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
        if (!rsnd_ssi_is_run_mods(mod, io))
                return 0;
 
-       rsnd_ssi_pointer_init(mod, io);
-
        ssi->usrcnt++;
 
        rsnd_mod_power_on(mod);
@@ -652,6 +597,8 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod,
        return 0;
 }
 
+static bool rsnd_ssi_pio_interrupt(struct rsnd_mod *mod,
+                                  struct rsnd_dai_stream *io);
 static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
                                 struct rsnd_dai_stream *io)
 {
@@ -670,30 +617,8 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
        status = rsnd_ssi_status_get(mod);
 
        /* PIO only */
-       if (!is_dma && (status & DIRQ)) {
-               struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-               u32 *buf = (u32 *)(runtime->dma_area +
-                                  rsnd_ssi_pointer_offset(mod, io, 0));
-               int shift = 0;
-
-               switch (runtime->sample_bits) {
-               case 32:
-                       shift = 8;
-                       break;
-               }
-
-               /*
-                * 8/16/32 data can be assesse to TDR/RDR register
-                * directly as 32bit data
-                * see rsnd_ssi_init()
-                */
-               if (rsnd_io_is_play(io))
-                       rsnd_mod_write(mod, SSITDR, (*buf) << shift);
-               else
-                       *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
-
-               elapsed = rsnd_ssi_pointer_update(mod, io, sizeof(*buf));
-       }
+       if (!is_dma && (status & DIRQ))
+               elapsed = rsnd_ssi_pio_interrupt(mod, io);
 
        /* DMA only */
        if (is_dma && (status & (UIRQ | OIRQ)))
@@ -831,14 +756,78 @@ static int rsnd_ssi_common_remove(struct rsnd_mod *mod,
        return 0;
 }
 
-static int rsnd_ssi_pointer(struct rsnd_mod *mod,
+/*
+ *     SSI PIO functions
+ */
+static bool rsnd_ssi_pio_interrupt(struct rsnd_mod *mod,
+                                  struct rsnd_dai_stream *io)
+{
+       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       u32 *buf = (u32 *)(runtime->dma_area + ssi->byte_pos);
+       int shift = 0;
+       int byte_pos;
+       bool elapsed = false;
+
+       if (snd_pcm_format_width(runtime->format) == 24)
+               shift = 8;
+
+       /*
+        * 8/16/32 data can be assesse to TDR/RDR register
+        * directly as 32bit data
+        * see rsnd_ssi_init()
+        */
+       if (rsnd_io_is_play(io))
+               rsnd_mod_write(mod, SSITDR, (*buf) << shift);
+       else
+               *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
+
+       byte_pos = ssi->byte_pos + sizeof(*buf);
+
+       if (byte_pos >= ssi->next_period_byte) {
+               int period_pos = byte_pos / ssi->byte_per_period;
+
+               if (period_pos >= runtime->periods) {
+                       byte_pos = 0;
+                       period_pos = 0;
+               }
+
+               ssi->next_period_byte = (period_pos + 1) * ssi->byte_per_period;
+
+               elapsed = true;
+       }
+
+       WRITE_ONCE(ssi->byte_pos, byte_pos);
+
+       return elapsed;
+}
+
+static int rsnd_ssi_pio_init(struct rsnd_mod *mod,
+                            struct rsnd_dai_stream *io,
+                            struct rsnd_priv *priv)
+{
+       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+       if (!rsnd_ssi_is_parent(mod, io)) {
+               ssi->byte_pos           = 0;
+               ssi->byte_per_period    = runtime->period_size *
+                                         runtime->channels *
+                                         samples_to_bytes(runtime, 1);
+               ssi->next_period_byte   = ssi->byte_per_period;
+       }
+
+       return rsnd_ssi_init(mod, io, priv);
+}
+
+static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
                            struct rsnd_dai_stream *io,
                            snd_pcm_uframes_t *pointer)
 {
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 
-       *pointer = bytes_to_frames(runtime, ssi->byte_pos);
+       *pointer = bytes_to_frames(runtime, READ_ONCE(ssi->byte_pos));
 
        return 0;
 }
@@ -847,12 +836,12 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
        .name   = SSI_NAME,
        .probe  = rsnd_ssi_common_probe,
        .remove = rsnd_ssi_common_remove,
-       .init   = rsnd_ssi_init,
+       .init   = rsnd_ssi_pio_init,
        .quit   = rsnd_ssi_quit,
        .start  = rsnd_ssi_start,
        .stop   = rsnd_ssi_stop,
        .irq    = rsnd_ssi_irq,
-       .pointer= rsnd_ssi_pointer,
+       .pointer = rsnd_ssi_pio_pointer,
        .pcm_new = rsnd_ssi_pcm_new,
        .hw_params = rsnd_ssi_hw_params,
 };
index 4d94875..6ff8a36 100644 (file)
@@ -125,6 +125,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
 {
        int hdmi = rsnd_ssi_hdmi_port(io);
        int ret;
+       u32 mode = 0;
 
        ret = rsnd_ssiu_init(mod, io, priv);
        if (ret < 0)
@@ -136,9 +137,11 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
                 * see
                 *      rsnd_ssi_config_init()
                 */
-               rsnd_mod_write(mod, SSI_MODE, 0x1);
+               mode = 0x1;
        }
 
+       rsnd_mod_write(mod, SSI_MODE, mode);
+
        if (rsnd_ssi_use_busif(io)) {
                rsnd_mod_write(mod, SSI_BUSIF_ADINR,
                               rsnd_get_adinr_bit(mod, io) |
index f21df28..3d7e1ff 100644 (file)
 
 #include <sound/soc-acpi.h>
 
-static acpi_status snd_soc_acpi_find_name(acpi_handle handle, u32 level,
-                                     void *context, void **ret)
-{
-       struct acpi_device *adev;
-       const char *name = NULL;
-
-       if (acpi_bus_get_device(handle, &adev))
-               return AE_OK;
-
-       if (adev->status.present && adev->status.functional) {
-               name = acpi_dev_name(adev);
-               *(const char **)ret = name;
-               return AE_CTRL_TERMINATE;
-       }
-
-       return AE_OK;
-}
-
-const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
-       const char *name = NULL;
-       acpi_status status;
-
-       status = acpi_get_devices(hid, snd_soc_acpi_find_name, NULL,
-                                 (void **)&name);
-
-       if (ACPI_FAILURE(status) || name[0] == '\0')
-               return NULL;
-
-       return name;
-}
-EXPORT_SYMBOL_GPL(snd_soc_acpi_find_name_from_hid);
-
-static acpi_status snd_soc_acpi_mach_match(acpi_handle handle, u32 level,
-                                      void *context, void **ret)
-{
-       unsigned long long sta;
-       acpi_status status;
-
-       *(bool *)context = true;
-       status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-       if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
-               *(bool *)context = false;
-
-       return AE_OK;
-}
-
-bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN])
-{
-       acpi_status status;
-       bool found = false;
-
-       status = acpi_get_devices(hid, snd_soc_acpi_mach_match, &found, NULL);
-
-       if (ACPI_FAILURE(status))
-               return false;
-
-       return found;
-}
-EXPORT_SYMBOL_GPL(snd_soc_acpi_check_hid);
-
 struct snd_soc_acpi_mach *
 snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
 {
        struct snd_soc_acpi_mach *mach;
 
        for (mach = machines; mach->id[0]; mach++) {
-               if (snd_soc_acpi_check_hid(mach->id) == true) {
-                       if (mach->machine_quirk == NULL)
-                               return mach;
-
-                       if (mach->machine_quirk(mach) != NULL)
-                               return mach;
+               if (acpi_dev_present(mach->id, NULL, -1)) {
+                       if (mach->machine_quirk)
+                               mach = mach->machine_quirk(mach);
+                       return mach;
                }
        }
        return NULL;
@@ -163,7 +100,7 @@ struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
                return mach;
 
        for (i = 0; i < codec_list->num_codecs; i++) {
-               if (snd_soc_acpi_check_hid(codec_list->codecs[i]) != true)
+               if (!acpi_dev_present(codec_list->codecs[i], NULL, -1))
                        return NULL;
        }
 
index d9b1e64..81232f4 100644 (file)
@@ -1096,7 +1096,6 @@ static struct snd_compr_ops soc_compr_dyn_ops = {
  */
 int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
 {
-       struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_platform *platform = rtd->platform;
        struct snd_soc_component *component;
        struct snd_soc_rtdcom_list *rtdcom;
@@ -1199,8 +1198,9 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
        ret = snd_compress_new(rtd->card->snd_card, num, direction,
                                new_name, compr);
        if (ret < 0) {
+               component = rtd->codec_dai->component;
                pr_err("compress asoc: can't create compress for codec %s\n",
-                       codec->component.name);
+                       component->name);
                goto compr_err;
        }
 
index c0edac8..e918795 100644 (file)
@@ -213,7 +213,7 @@ static umode_t soc_dev_attr_is_visible(struct kobject *kobj,
 
        if (attr == &dev_attr_pmdown_time.attr)
                return attr->mode; /* always visible */
-       return rtd->codec ? attr->mode : 0; /* enabled only with codec */
+       return rtd->num_codecs ? attr->mode : 0; /* enabled only with codec */
 }
 
 static const struct attribute_group soc_dapm_dev_group = {
@@ -349,120 +349,84 @@ static void soc_init_codec_debugfs(struct snd_soc_component *component)
                        "ASoC: Failed to create codec register debugfs file\n");
 }
 
-static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
-                                   size_t count, loff_t *ppos)
+static int codec_list_seq_show(struct seq_file *m, void *v)
 {
-       char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       ssize_t len, ret = 0;
        struct snd_soc_codec *codec;
 
-       if (!buf)
-               return -ENOMEM;
-
        mutex_lock(&client_mutex);
 
-       list_for_each_entry(codec, &codec_list, list) {
-               len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
-                              codec->component.name);
-               if (len >= 0)
-                       ret += len;
-               if (ret > PAGE_SIZE) {
-                       ret = PAGE_SIZE;
-                       break;
-               }
-       }
+       list_for_each_entry(codec, &codec_list, list)
+               seq_printf(m, "%s\n", codec->component.name);
 
        mutex_unlock(&client_mutex);
 
-       if (ret >= 0)
-               ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
-       kfree(buf);
+       return 0;
+}
 
-       return ret;
+static int codec_list_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, codec_list_seq_show, NULL);
 }
 
 static const struct file_operations codec_list_fops = {
-       .read = codec_list_read_file,
-       .llseek = default_llseek,/* read accesses f_pos */
+       .open = codec_list_seq_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
 };
 
-static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
-                                 size_t count, loff_t *ppos)
+static int dai_list_seq_show(struct seq_file *m, void *v)
 {
-       char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       ssize_t len, ret = 0;
        struct snd_soc_component *component;
        struct snd_soc_dai *dai;
 
-       if (!buf)
-               return -ENOMEM;
-
        mutex_lock(&client_mutex);
 
-       list_for_each_entry(component, &component_list, list) {
-               list_for_each_entry(dai, &component->dai_list, list) {
-                       len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
-                               dai->name);
-                       if (len >= 0)
-                               ret += len;
-                       if (ret > PAGE_SIZE) {
-                               ret = PAGE_SIZE;
-                               break;
-                       }
-               }
-       }
+       list_for_each_entry(component, &component_list, list)
+               list_for_each_entry(dai, &component->dai_list, list)
+                       seq_printf(m, "%s\n", dai->name);
 
        mutex_unlock(&client_mutex);
 
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
-       kfree(buf);
+       return 0;
+}
 
-       return ret;
+static int dai_list_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dai_list_seq_show, NULL);
 }
 
 static const struct file_operations dai_list_fops = {
-       .read = dai_list_read_file,
-       .llseek = default_llseek,/* read accesses f_pos */
+       .open = dai_list_seq_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
 };
 
-static ssize_t platform_list_read_file(struct file *file,
-                                      char __user *user_buf,
-                                      size_t count, loff_t *ppos)
+static int platform_list_seq_show(struct seq_file *m, void *v)
 {
-       char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       ssize_t len, ret = 0;
        struct snd_soc_platform *platform;
 
-       if (!buf)
-               return -ENOMEM;
-
        mutex_lock(&client_mutex);
 
-       list_for_each_entry(platform, &platform_list, list) {
-               len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
-                              platform->component.name);
-               if (len >= 0)
-                       ret += len;
-               if (ret > PAGE_SIZE) {
-                       ret = PAGE_SIZE;
-                       break;
-               }
-       }
+       list_for_each_entry(platform, &platform_list, list)
+               seq_printf(m, "%s\n", platform->component.name);
 
        mutex_unlock(&client_mutex);
 
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
-       kfree(buf);
+       return 0;
+}
 
-       return ret;
+static int platform_list_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, platform_list_seq_show, NULL);
 }
 
 static const struct file_operations platform_list_fops = {
-       .read = platform_list_read_file,
-       .llseek = default_llseek,/* read accesses f_pos */
+       .open = platform_list_seq_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
 };
 
 static void soc_init_card_debugfs(struct snd_soc_card *card)
@@ -491,7 +455,6 @@ static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
        debugfs_remove_recursive(card->debugfs_card_root);
 }
 
-
 static void snd_soc_debugfs_init(void)
 {
        snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL);
@@ -598,6 +561,7 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
 
 struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
                const char *dai_link, int stream)
@@ -1392,6 +1356,17 @@ static int soc_init_dai_link(struct snd_soc_card *card,
        return 0;
 }
 
+void snd_soc_disconnect_sync(struct device *dev)
+{
+       struct snd_soc_component *component = snd_soc_lookup_component(dev, NULL);
+
+       if (!component || !component->card)
+               return;
+
+       snd_card_disconnect_sync(component->card->snd_card);
+}
+EXPORT_SYMBOL_GPL(snd_soc_disconnect_sync);
+
 /**
  * snd_soc_add_dai_link - Add a DAI link dynamically
  * @card: The ASoC card to which the DAI link is added
@@ -1945,7 +1920,9 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
        }
 
        /* Flip the polarity for the "CPU" end of a CODEC<->CODEC link */
-       if (cpu_dai->codec) {
+       /* the component which has non_legacy_dai_naming is Codec */
+       if (cpu_dai->codec ||
+           cpu_dai->component->driver->non_legacy_dai_naming) {
                unsigned int inv_dai_fmt;
 
                inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK;
@@ -3149,7 +3126,7 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
        if (!dai->driver->ops)
                dai->driver->ops = &null_dai_ops;
 
-       list_add(&dai->list, &component->dai_list);
+       list_add_tail(&dai->list, &component->dai_list);
        component->num_dai++;
 
        dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name);
@@ -3176,8 +3153,6 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
 
        dev_dbg(dev, "ASoC: dai register %s #%zu\n", dev_name(dev), count);
 
-       component->dai_drv = dai_drv;
-
        for (i = 0; i < count; i++) {
 
                dai = soc_add_dai(component, dai_drv + i,
@@ -4354,6 +4329,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
                                                             args,
                                                             dai_name);
                } else {
+                       struct snd_soc_dai *dai;
                        int id = -1;
 
                        switch (args->args_count) {
@@ -4375,7 +4351,14 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
 
                        ret = 0;
 
-                       *dai_name = pos->dai_drv[id].name;
+                       /* find target DAI */
+                       list_for_each_entry(dai, &pos->dai_list, list) {
+                               if (id == 0)
+                                       break;
+                               id--;
+                       }
+
+                       *dai_name = dai->driver->name;
                        if (!*dai_name)
                                *dai_name = pos->name;
                }
index 20340ad..2bc1c4c 100644 (file)
@@ -34,6 +34,10 @@ int snd_soc_component_read(struct snd_soc_component *component,
                ret = regmap_read(component->regmap, reg, val);
        else if (component->read)
                ret = component->read(component, reg, val);
+       else if (component->driver->read) {
+               *val = component->driver->read(component, reg);
+               ret = 0;
+       }
        else
                ret = -EIO;
 
@@ -70,6 +74,8 @@ int snd_soc_component_write(struct snd_soc_component *component,
                return regmap_write(component->regmap, reg, val);
        else if (component->write)
                return component->write(component, reg, val);
+       else if (component->driver->write)
+               return component->driver->write(component, reg, val);
        else
                return -EIO;
 }
index 500f98c..7144a51 100644 (file)
@@ -378,7 +378,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
        unsigned int rshift = mc->rshift;
        int max = mc->max;
        int min = mc->min;
-       int mask = (1 << (fls(min + max) - 1)) - 1;
+       unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
        unsigned int val;
        int ret;
 
@@ -423,7 +423,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
        unsigned int rshift = mc->rshift;
        int max = mc->max;
        int min = mc->min;
-       int mask = (1 << (fls(min + max) - 1)) - 1;
+       unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
        int err = 0;
        unsigned int val, val_mask, val2 = 0;
 
index e30aacb..bcd3da2 100644 (file)
@@ -288,7 +288,7 @@ static const struct snd_soc_platform_driver dummy_platform = {
        .ops = &dummy_dma_ops,
 };
 
-static struct snd_soc_codec_driver dummy_codec;
+static const struct snd_soc_codec_driver dummy_codec;
 
 #define STUB_RATES     SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS   (SNDRV_PCM_FMTBIT_S8 | \
index 3398e6c..3ad881f 100644 (file)
@@ -28,4 +28,16 @@ config SND_SOC_STM32_SPDIFRX
        help
          Say Y if you want to enable S/PDIF capture for STM32
 
+config SND_SOC_STM32_DFSDM
+       tristate "SoC Audio support for STM32 DFSDM"
+       depends on ARCH_STM32 || COMPILE_TEST
+       depends on SND_SOC
+       depends on STM32_DFSDM_ADC
+       select SND_SOC_GENERIC_DMAENGINE_PCM
+       select SND_SOC_DMIC
+       select IIO_BUFFER_CB
+       help
+         Select this option to enable the STM32 Digital Filter
+         for Sigma Delta Modulators (DFSDM) driver used
+         in various STM32 series for digital microphone capture.
 endmenu
index 5b7f0fa..3143c0b 100644 (file)
@@ -13,3 +13,6 @@ obj-$(CONFIG_SND_SOC_STM32_I2S) += snd-soc-stm32-i2s.o
 # SPDIFRX
 snd-soc-stm32-spdifrx-objs := stm32_spdifrx.o
 obj-$(CONFIG_SND_SOC_STM32_SPDIFRX) += snd-soc-stm32-spdifrx.o
+
+#DFSDM
+obj-$(CONFIG_SND_SOC_STM32_DFSDM) += stm32_adfsdm.o
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
new file mode 100644 (file)
index 0000000..7306e3e
--- /dev/null
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part of STM32 DFSDM ASoC DAI driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          Olivier Moysan <olivier.moysan@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/adc/stm32-dfsdm-adc.h>
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#define STM32_ADFSDM_DRV_NAME "stm32-adfsdm"
+
+#define DFSDM_MAX_PERIOD_SIZE  (PAGE_SIZE / 2)
+#define DFSDM_MAX_PERIODS      6
+
+struct stm32_adfsdm_priv {
+       struct snd_soc_dai_driver dai_drv;
+       struct snd_pcm_substream *substream;
+       struct device *dev;
+
+       /* IIO */
+       struct iio_channel *iio_ch;
+       struct iio_cb_buffer *iio_cb;
+       bool iio_active;
+
+       /* PCM buffer */
+       unsigned char *pcm_buff;
+       unsigned int pos;
+};
+
+static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = {
+       .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+           SNDRV_PCM_INFO_PAUSE,
+       .formats = SNDRV_PCM_FMTBIT_S32_LE,
+
+       .rate_min = 8000,
+       .rate_max = 32000,
+
+       .channels_min = 1,
+       .channels_max = 1,
+
+       .periods_min = 2,
+       .periods_max = DFSDM_MAX_PERIODS,
+
+       .period_bytes_max = DFSDM_MAX_PERIOD_SIZE,
+       .buffer_bytes_max = DFSDM_MAX_PERIODS * DFSDM_MAX_PERIOD_SIZE
+};
+
+static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream,
+                                 struct snd_soc_dai *dai)
+{
+       struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+       if (priv->iio_active) {
+               iio_channel_stop_all_cb(priv->iio_cb);
+               priv->iio_active = false;
+       }
+}
+
+static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
+                                   struct snd_soc_dai *dai)
+{
+       struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+       int ret;
+
+       ret = iio_write_channel_attribute(priv->iio_ch,
+                                         substream->runtime->rate, 0,
+                                         IIO_CHAN_INFO_SAMP_FREQ);
+       if (ret < 0) {
+               dev_err(dai->dev, "%s: Failed to set %d sampling rate\n",
+                       __func__, substream->runtime->rate);
+               return ret;
+       }
+
+       if (!priv->iio_active) {
+               ret = iio_channel_start_all_cb(priv->iio_cb);
+               if (!ret)
+                       priv->iio_active = true;
+               else
+                       dev_err(dai->dev, "%s: IIO channel start failed (%d)\n",
+                               __func__, ret);
+       }
+
+       return ret;
+}
+
+static int stm32_adfsdm_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+                                  unsigned int freq, int dir)
+{
+       struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+       ssize_t size;
+       char str_freq[10];
+
+       dev_dbg(dai->dev, "%s: Enter for freq %d\n", __func__, freq);
+
+       /* Set IIO frequency if CODEC is master as clock comes from SPI_IN */
+
+       snprintf(str_freq, sizeof(str_freq), "%d\n", freq);
+       size = iio_write_channel_ext_info(priv->iio_ch, "spi_clk_freq",
+                                         str_freq, sizeof(str_freq));
+       if (size != sizeof(str_freq)) {
+               dev_err(dai->dev, "%s: Failed to set SPI clock\n",
+                       __func__);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static const struct snd_soc_dai_ops stm32_adfsdm_dai_ops = {
+       .shutdown = stm32_adfsdm_shutdown,
+       .prepare = stm32_adfsdm_dai_prepare,
+       .set_sysclk = stm32_adfsdm_set_sysclk,
+};
+
+static const struct snd_soc_dai_driver stm32_adfsdm_dai = {
+       .capture = {
+                   .channels_min = 1,
+                   .channels_max = 1,
+                   .formats = SNDRV_PCM_FMTBIT_S32_LE,
+                   .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+                             SNDRV_PCM_RATE_32000),
+                   },
+       .ops = &stm32_adfsdm_dai_ops,
+};
+
+static const struct snd_soc_component_driver stm32_adfsdm_dai_component = {
+       .name = "stm32_dfsdm_audio",
+};
+
+static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
+{
+       struct stm32_adfsdm_priv *priv = private;
+       struct snd_soc_pcm_runtime *rtd = priv->substream->private_data;
+       u8 *pcm_buff = priv->pcm_buff;
+       u8 *src_buff = (u8 *)data;
+       unsigned int buff_size = snd_pcm_lib_buffer_bytes(priv->substream);
+       unsigned int period_size = snd_pcm_lib_period_bytes(priv->substream);
+       unsigned int old_pos = priv->pos;
+       unsigned int cur_size = size;
+
+       dev_dbg(rtd->dev, "%s: buff_add :%p, pos = %d, size = %zu\n",
+               __func__, &pcm_buff[priv->pos], priv->pos, size);
+
+       if ((priv->pos + size) > buff_size) {
+               memcpy(&pcm_buff[priv->pos], src_buff, buff_size - priv->pos);
+               cur_size -= buff_size - priv->pos;
+               priv->pos = 0;
+       }
+
+       memcpy(&pcm_buff[priv->pos], &src_buff[size - cur_size], cur_size);
+       priv->pos = (priv->pos + cur_size) % buff_size;
+
+       if (cur_size != size || (old_pos && (old_pos % period_size < size)))
+               snd_pcm_period_elapsed(priv->substream);
+
+       return 0;
+}
+
+static int stm32_adfsdm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct stm32_adfsdm_priv *priv =
+               snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+               priv->pos = 0;
+               return stm32_dfsdm_get_buff_cb(priv->iio_ch->indio_dev,
+                                              stm32_afsdm_pcm_cb, priv);
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_STOP:
+               return stm32_dfsdm_release_buff_cb(priv->iio_ch->indio_dev);
+       }
+
+       return -EINVAL;
+}
+
+static int stm32_adfsdm_pcm_open(struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+       int ret;
+
+       ret =  snd_soc_set_runtime_hwparams(substream, &stm32_adfsdm_pcm_hw);
+       if (!ret)
+               priv->substream = substream;
+
+       return ret;
+}
+
+static int stm32_adfsdm_pcm_close(struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct stm32_adfsdm_priv *priv =
+               snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+       snd_pcm_lib_free_pages(substream);
+       priv->substream = NULL;
+
+       return 0;
+}
+
+static snd_pcm_uframes_t stm32_adfsdm_pcm_pointer(
+                                           struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct stm32_adfsdm_priv *priv =
+               snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+       return bytes_to_frames(substream->runtime, priv->pos);
+}
+
+static int stm32_adfsdm_pcm_hw_params(struct snd_pcm_substream *substream,
+                                     struct snd_pcm_hw_params *params)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct stm32_adfsdm_priv *priv =
+               snd_soc_dai_get_drvdata(rtd->cpu_dai);
+       int ret;
+
+       ret =  snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+       if (ret < 0)
+               return ret;
+       priv->pcm_buff = substream->runtime->dma_area;
+
+       return iio_channel_cb_set_buffer_watermark(priv->iio_cb,
+                                                  params_period_size(params));
+}
+
+static int stm32_adfsdm_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+       snd_pcm_lib_free_pages(substream);
+
+       return 0;
+}
+
+static struct snd_pcm_ops stm32_adfsdm_pcm_ops = {
+       .open           = stm32_adfsdm_pcm_open,
+       .close          = stm32_adfsdm_pcm_close,
+       .hw_params      = stm32_adfsdm_pcm_hw_params,
+       .hw_free        = stm32_adfsdm_pcm_hw_free,
+       .trigger        = stm32_adfsdm_trigger,
+       .pointer        = stm32_adfsdm_pcm_pointer,
+};
+
+static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+       struct snd_pcm *pcm = rtd->pcm;
+       struct stm32_adfsdm_priv *priv =
+               snd_soc_dai_get_drvdata(rtd->cpu_dai);
+       unsigned int size = DFSDM_MAX_PERIODS * DFSDM_MAX_PERIOD_SIZE;
+
+       return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+                                                    priv->dev, size, size);
+}
+
+static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
+{
+       struct snd_pcm_substream *substream;
+       struct snd_soc_pcm_runtime *rtd;
+       struct stm32_adfsdm_priv *priv;
+
+       substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+       if (substream) {
+               rtd = substream->private_data;
+               priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+               snd_pcm_lib_preallocate_free_for_all(pcm);
+       }
+}
+
+static struct snd_soc_platform_driver stm32_adfsdm_soc_platform = {
+       .ops            = &stm32_adfsdm_pcm_ops,
+       .pcm_new        = stm32_adfsdm_pcm_new,
+       .pcm_free       = stm32_adfsdm_pcm_free,
+};
+
+static const struct of_device_id stm32_adfsdm_of_match[] = {
+       {.compatible = "st,stm32h7-dfsdm-dai"},
+       {}
+};
+MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match);
+
+static int stm32_adfsdm_probe(struct platform_device *pdev)
+{
+       struct stm32_adfsdm_priv *priv;
+       int ret;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->dev = &pdev->dev;
+       priv->dai_drv = stm32_adfsdm_dai;
+
+       dev_set_drvdata(&pdev->dev, priv);
+
+       ret = devm_snd_soc_register_component(&pdev->dev,
+                                             &stm32_adfsdm_dai_component,
+                                             &priv->dai_drv, 1);
+       if (ret < 0)
+               return ret;
+
+       /* Associate iio channel */
+       priv->iio_ch  = devm_iio_channel_get_all(&pdev->dev);
+       if (IS_ERR(priv->iio_ch))
+               return PTR_ERR(priv->iio_ch);
+
+       priv->iio_cb = iio_channel_get_all_cb(&pdev->dev, NULL, NULL);
+       if (IS_ERR(priv->iio_cb))
+               return PTR_ERR(priv->iio_cb);
+
+       ret = devm_snd_soc_register_platform(&pdev->dev,
+                                            &stm32_adfsdm_soc_platform);
+       if (ret < 0)
+               dev_err(&pdev->dev, "%s: Failed to register PCM platform\n",
+                       __func__);
+
+       return ret;
+}
+
+static struct platform_driver stm32_adfsdm_driver = {
+       .driver = {
+                  .name = STM32_ADFSDM_DRV_NAME,
+                  .of_match_table = stm32_adfsdm_of_match,
+                  },
+       .probe = stm32_adfsdm_probe,
+};
+
+module_platform_driver(stm32_adfsdm_driver);
+
+MODULE_DESCRIPTION("stm32 DFSDM DAI driver");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" STM32_ADFSDM_DRV_NAME);
index d6f71a3..d743b7d 100644 (file)
 
 #include "stm32_sai.h"
 
-static LIST_HEAD(sync_providers);
-static DEFINE_MUTEX(sync_mutex);
-
-struct sync_provider {
-       struct list_head link;
-       struct device_node *node;
-       int  (*sync_conf)(void *data, int synco);
-       void *data;
-};
-
 static const struct stm32_sai_conf stm32_sai_conf_f4 = {
        .version = SAI_STM32F4,
 };
@@ -70,9 +60,8 @@ static int stm32_sai_sync_conf_client(struct stm32_sai_data *sai, int synci)
        return 0;
 }
 
-static int stm32_sai_sync_conf_provider(void *data, int synco)
+static int stm32_sai_sync_conf_provider(struct stm32_sai_data *sai, int synco)
 {
-       struct stm32_sai_data *sai = (struct stm32_sai_data *)data;
        u32 prev_synco;
        int ret;
 
@@ -103,83 +92,42 @@ static int stm32_sai_sync_conf_provider(void *data, int synco)
        return 0;
 }
 
-static int stm32_sai_set_sync_provider(struct device_node *np, int synco)
+static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
+                             struct device_node *np_provider,
+                             int synco, int synci)
 {
-       struct sync_provider *provider;
+       struct platform_device *pdev = of_find_device_by_node(np_provider);
+       struct stm32_sai_data *sai_provider;
        int ret;
 
-       mutex_lock(&sync_mutex);
-       list_for_each_entry(provider, &sync_providers, link) {
-               if (provider->node == np) {
-                       ret = provider->sync_conf(provider->data, synco);
-                       mutex_unlock(&sync_mutex);
-                       return ret;
-               }
+       if (!pdev) {
+               dev_err(&sai_client->pdev->dev,
+                       "Device not found for node %s\n", np_provider->name);
+               return -ENODEV;
        }
-       mutex_unlock(&sync_mutex);
 
-       /* SAI sync provider not found */
-       return -ENODEV;
-}
-
-static int stm32_sai_set_sync(struct stm32_sai_data *sai,
-                             struct device_node *np_provider,
-                             int synco, int synci)
-{
-       int ret;
+       sai_provider = platform_get_drvdata(pdev);
+       if (!sai_provider) {
+               dev_err(&sai_client->pdev->dev,
+                       "SAI sync provider data not found\n");
+               return -EINVAL;
+       }
 
        /* Configure sync client */
-       stm32_sai_sync_conf_client(sai, synci);
+       ret = stm32_sai_sync_conf_client(sai_client, synci);
+       if (ret < 0)
+               return ret;
 
        /* Configure sync provider */
-       ret = stm32_sai_set_sync_provider(np_provider, synco);
-
-       return ret;
-}
-
-static int stm32_sai_sync_add_provider(struct platform_device *pdev,
-                                      void *data)
-{
-       struct sync_provider *sp;
-
-       sp = devm_kzalloc(&pdev->dev, sizeof(*sp), GFP_KERNEL);
-       if (!sp)
-               return -ENOMEM;
-
-       sp->node = of_node_get(pdev->dev.of_node);
-       sp->data = data;
-       sp->sync_conf = &stm32_sai_sync_conf_provider;
-
-       mutex_lock(&sync_mutex);
-       list_add(&sp->link, &sync_providers);
-       mutex_unlock(&sync_mutex);
-
-       return 0;
-}
-
-static void stm32_sai_sync_del_provider(struct device_node *np)
-{
-       struct sync_provider *sp;
-
-       mutex_lock(&sync_mutex);
-       list_for_each_entry(sp, &sync_providers, link) {
-               if (sp->node == np) {
-                       list_del(&sp->link);
-                       of_node_put(sp->node);
-                       break;
-               }
-       }
-       mutex_unlock(&sync_mutex);
+       return stm32_sai_sync_conf_provider(sai_provider, synco);
 }
 
 static int stm32_sai_probe(struct platform_device *pdev)
 {
-       struct device_node *np = pdev->dev.of_node;
        struct stm32_sai_data *sai;
        struct reset_control *rst;
        struct resource *res;
        const struct of_device_id *of_id;
-       int ret;
 
        sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
        if (!sai)
@@ -231,28 +179,11 @@ static int stm32_sai_probe(struct platform_device *pdev)
                reset_control_deassert(rst);
        }
 
-       ret = stm32_sai_sync_add_provider(pdev, sai);
-       if (ret < 0)
-               return ret;
-       sai->set_sync = &stm32_sai_set_sync;
-
        sai->pdev = pdev;
+       sai->set_sync = &stm32_sai_set_sync;
        platform_set_drvdata(pdev, sai);
 
-       ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
-       if (ret < 0)
-               stm32_sai_sync_del_provider(np);
-
-       return ret;
-}
-
-static int stm32_sai_remove(struct platform_device *pdev)
-{
-       of_platform_depopulate(&pdev->dev);
-
-       stm32_sai_sync_del_provider(pdev->dev.of_node);
-
-       return 0;
+       return devm_of_platform_populate(&pdev->dev);
 }
 
 MODULE_DEVICE_TABLE(of, stm32_sai_ids);
@@ -263,7 +194,6 @@ static struct platform_driver stm32_sai_driver = {
                .of_match_table = stm32_sai_ids,
        },
        .probe = stm32_sai_probe,
-       .remove = stm32_sai_remove,
 };
 
 module_platform_driver(stm32_sai_driver);
index 5da4efe..8862816 100644 (file)
@@ -590,12 +590,28 @@ static int sun4i_codec_hw_params(struct snd_pcm_substream *substream,
                                             hwrate);
 }
 
+
+static unsigned int sun4i_codec_src_rates[] = {
+       8000, 11025, 12000, 16000, 22050, 24000, 32000,
+       44100, 48000, 96000, 192000
+};
+
+
+static struct snd_pcm_hw_constraint_list sun4i_codec_constraints = {
+       .count  = ARRAY_SIZE(sun4i_codec_src_rates),
+       .list   = sun4i_codec_src_rates,
+};
+
+
 static int sun4i_codec_startup(struct snd_pcm_substream *substream,
                               struct snd_soc_dai *dai)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card);
 
+       snd_pcm_hw_constraint_list(substream->runtime, 0,
+                               SNDRV_PCM_HW_PARAM_RATE, &sun4i_codec_constraints);
+
        /*
         * Stop issuing DRQ when we have room for less than 16 samples
         * in our TX FIFO
@@ -633,9 +649,7 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
                .channels_max   = 2,
                .rate_min       = 8000,
                .rate_max       = 192000,
-               .rates          = SNDRV_PCM_RATE_8000_48000 |
-                                 SNDRV_PCM_RATE_96000 |
-                                 SNDRV_PCM_RATE_192000,
+               .rates          = SNDRV_PCM_RATE_CONTINUOUS,
                .formats        = SNDRV_PCM_FMTBIT_S16_LE |
                                  SNDRV_PCM_FMTBIT_S32_LE,
                .sig_bits       = 24,
@@ -645,11 +659,8 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
                .channels_min   = 1,
                .channels_max   = 2,
                .rate_min       = 8000,
-               .rate_max       = 192000,
-               .rates          = SNDRV_PCM_RATE_8000_48000 |
-                                 SNDRV_PCM_RATE_96000 |
-                                 SNDRV_PCM_RATE_192000 |
-                                 SNDRV_PCM_RATE_KNOT,
+               .rate_max       = 48000,
+               .rates          = SNDRV_PCM_RATE_CONTINUOUS,
                .formats        = SNDRV_PCM_FMTBIT_S16_LE |
                                  SNDRV_PCM_FMTBIT_S32_LE,
                .sig_bits       = 24,
@@ -1128,7 +1139,7 @@ static const struct snd_soc_component_driver sun4i_codec_component = {
        .name = "sun4i-codec",
 };
 
-#define SUN4I_CODEC_RATES      SNDRV_PCM_RATE_8000_192000
+#define SUN4I_CODEC_RATES      SNDRV_PCM_RATE_CONTINUOUS
 #define SUN4I_CODEC_FORMATS    (SNDRV_PCM_FMTBIT_S16_LE | \
                                 SNDRV_PCM_FMTBIT_S32_LE)
 
index 04f9258..dca1143 100644 (file)
@@ -269,10 +269,11 @@ static bool sun4i_i2s_oversample_is_valid(unsigned int oversample)
        return false;
 }
 
-static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
+static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
                                  unsigned int rate,
                                  unsigned int word_size)
 {
+       struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
        unsigned int oversample_rate, clk_rate;
        int bclk_div, mclk_div;
        int ret;
@@ -300,6 +301,7 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
                break;
 
        default:
+               dev_err(dai->dev, "Unsupported sample rate: %u\n", rate);
                return -EINVAL;
        }
 
@@ -308,18 +310,25 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
                return ret;
 
        oversample_rate = i2s->mclk_freq / rate;
-       if (!sun4i_i2s_oversample_is_valid(oversample_rate))
+       if (!sun4i_i2s_oversample_is_valid(oversample_rate)) {
+               dev_err(dai->dev, "Unsupported oversample rate: %d\n",
+                       oversample_rate);
                return -EINVAL;
+       }
 
        bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
                                          word_size);
-       if (bclk_div < 0)
+       if (bclk_div < 0) {
+               dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
                return -EINVAL;
+       }
 
        mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
                                          clk_rate, rate);
-       if (mclk_div < 0)
+       if (mclk_div < 0) {
+               dev_err(dai->dev, "Unsupported MCLK divider: %d\n", mclk_div);
                return -EINVAL;
+       }
 
        /* Adjust the clock division values if needed */
        bclk_div += i2s->variant->bclk_offset;
@@ -349,8 +358,11 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
        u32 width;
 
        channels = params_channels(params);
-       if (channels != 2)
+       if (channels != 2) {
+               dev_err(dai->dev, "Unsupported number of channels: %d\n",
+                       channels);
                return -EINVAL;
+       }
 
        if (i2s->variant->has_chcfg) {
                regmap_update_bits(i2s->regmap, SUN8I_I2S_CHAN_CFG_REG,
@@ -382,6 +394,8 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
                width = DMA_SLAVE_BUSWIDTH_2_BYTES;
                break;
        default:
+               dev_err(dai->dev, "Unsupported physical sample width: %d\n",
+                       params_physical_width(params));
                return -EINVAL;
        }
        i2s->playback_dma_data.addr_width = width;
@@ -393,6 +407,8 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
                break;
 
        default:
+               dev_err(dai->dev, "Unsupported sample width: %d\n",
+                       params_width(params));
                return -EINVAL;
        }
 
@@ -401,7 +417,7 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
        regmap_field_write(i2s->field_fmt_sr,
                           sr + i2s->variant->fmt_offset);
 
-       return sun4i_i2s_set_clk_rate(i2s, params_rate(params),
+       return sun4i_i2s_set_clk_rate(dai, params_rate(params),
                                      params_width(params));
 }
 
@@ -426,6 +442,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                val = SUN4I_I2S_FMT0_FMT_RIGHT_J;
                break;
        default:
+               dev_err(dai->dev, "Unsupported format: %d\n",
+                       fmt & SND_SOC_DAIFMT_FORMAT_MASK);
                return -EINVAL;
        }
 
@@ -464,6 +482,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_NB_NF:
                break;
        default:
+               dev_err(dai->dev, "Unsupported clock polarity: %d\n",
+                       fmt & SND_SOC_DAIFMT_INV_MASK);
                return -EINVAL;
        }
 
@@ -482,6 +502,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                        val = SUN4I_I2S_CTRL_MODE_SLAVE;
                        break;
                default:
+                       dev_err(dai->dev, "Unsupported slave setting: %d\n",
+                               fmt & SND_SOC_DAIFMT_MASTER_MASK);
                        return -EINVAL;
                }
                regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
@@ -504,6 +526,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                        val = 0;
                        break;
                default:
+                       dev_err(dai->dev, "Unsupported slave setting: %d\n",
+                               fmt & SND_SOC_DAIFMT_MASTER_MASK);
                        return -EINVAL;
                }
                regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
@@ -897,6 +921,23 @@ static const struct sun4i_i2s_quirks sun6i_a31_i2s_quirks = {
        .field_rxchansel        = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
 };
 
+static const struct sun4i_i2s_quirks sun8i_a83t_i2s_quirks = {
+       .has_reset              = true,
+       .reg_offset_txdata      = SUN8I_I2S_FIFO_TX_REG,
+       .sun4i_i2s_regmap       = &sun4i_i2s_regmap_config,
+       .field_clkdiv_mclk_en   = REG_FIELD(SUN4I_I2S_CLK_DIV_REG, 7, 7),
+       .field_fmt_wss          = REG_FIELD(SUN4I_I2S_FMT0_REG, 2, 3),
+       .field_fmt_sr           = REG_FIELD(SUN4I_I2S_FMT0_REG, 4, 5),
+       .field_fmt_bclk         = REG_FIELD(SUN4I_I2S_FMT0_REG, 6, 6),
+       .field_fmt_lrclk        = REG_FIELD(SUN4I_I2S_FMT0_REG, 7, 7),
+       .has_slave_select_bit   = true,
+       .field_fmt_mode         = REG_FIELD(SUN4I_I2S_FMT0_REG, 0, 1),
+       .field_txchanmap        = REG_FIELD(SUN4I_I2S_TX_CHAN_MAP_REG, 0, 31),
+       .field_rxchanmap        = REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31),
+       .field_txchansel        = REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2),
+       .field_rxchansel        = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
+};
+
 static const struct sun4i_i2s_quirks sun8i_h3_i2s_quirks = {
        .has_reset              = true,
        .reg_offset_txdata      = SUN8I_I2S_FIFO_TX_REG,
@@ -1120,6 +1161,10 @@ static const struct of_device_id sun4i_i2s_match[] = {
                .compatible = "allwinner,sun6i-a31-i2s",
                .data = &sun6i_a31_i2s_quirks,
        },
+       {
+               .compatible = "allwinner,sun8i-a83t-i2s",
+               .data = &sun8i_a83t_i2s_quirks,
+       },
        {
                .compatible = "allwinner,sun8i-h3-i2s",
                .data = &sun8i_h3_i2s_quirks,
diff --git a/sound/soc/uniphier/Kconfig b/sound/soc/uniphier/Kconfig
new file mode 100644 (file)
index 0000000..02886a4
--- /dev/null
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+config SND_SOC_UNIPHIER
+       tristate "ASoC support for UniPhier"
+       depends on (ARCH_UNIPHIER || COMPILE_TEST)
+       help
+         Say Y or M if you want to add support for the Socionext
+         UniPhier SoC audio interfaces. You will also need to select the
+         audio interfaces to support below.
+         If unsure select "N".
+
+config SND_SOC_UNIPHIER_EVEA_CODEC
+       tristate "UniPhier SoC internal audio codec"
+       depends on SND_SOC_UNIPHIER
+       select REGMAP_MMIO
+       help
+         This adds Codec driver for Socionext UniPhier LD11/20 SoC
+         internal DAC. This driver supports Line In / Out and HeadPhone.
+         Select Y if you use such device.
+         If unsure select "N".
diff --git a/sound/soc/uniphier/Makefile b/sound/soc/uniphier/Makefile
new file mode 100644 (file)
index 0000000..3be00d7
--- /dev/null
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+snd-soc-uniphier-evea-objs := evea.o
+obj-$(CONFIG_SND_SOC_UNIPHIER_EVEA_CODEC) += snd-soc-uniphier-evea.o
diff --git a/sound/soc/uniphier/evea.c b/sound/soc/uniphier/evea.c
new file mode 100644 (file)
index 0000000..0cc9eff
--- /dev/null
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Socionext UniPhier EVEA ADC/DAC codec driver.
+ *
+ * Copyright (c) 2016-2017 Socionext Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#define DRV_NAME        "evea"
+#define EVEA_RATES      SNDRV_PCM_RATE_48000
+#define EVEA_FORMATS    SNDRV_PCM_FMTBIT_S32_LE
+
+#define AADCPOW(n)                           (0x0078 + 0x04 * (n))
+#define   AADCPOW_AADC_POWD                   BIT(0)
+#define AHPOUTPOW                            0x0098
+#define   AHPOUTPOW_HP_ON                     BIT(4)
+#define ALINEPOW                             0x009c
+#define   ALINEPOW_LIN2_POWD                  BIT(3)
+#define   ALINEPOW_LIN1_POWD                  BIT(4)
+#define ALO1OUTPOW                           0x00a8
+#define   ALO1OUTPOW_LO1_ON                   BIT(4)
+#define ALO2OUTPOW                           0x00ac
+#define   ALO2OUTPOW_ADAC2_MUTE               BIT(0)
+#define   ALO2OUTPOW_LO2_ON                   BIT(4)
+#define AANAPOW                              0x00b8
+#define   AANAPOW_A_POWD                      BIT(4)
+#define ADACSEQ1(n)                          (0x0144 + 0x40 * (n))
+#define   ADACSEQ1_MMUTE                      BIT(1)
+#define ADACSEQ2(n)                          (0x0160 + 0x40 * (n))
+#define   ADACSEQ2_ADACIN_FIX                 BIT(0)
+#define ADAC1ODC                             0x0200
+#define   ADAC1ODC_HP_DIS_RES_MASK            GENMASK(2, 1)
+#define   ADAC1ODC_HP_DIS_RES_OFF             (0x0 << 1)
+#define   ADAC1ODC_HP_DIS_RES_ON              (0x3 << 1)
+#define   ADAC1ODC_ADAC_RAMPCLT_MASK          GENMASK(8, 7)
+#define   ADAC1ODC_ADAC_RAMPCLT_NORMAL        (0x0 << 7)
+#define   ADAC1ODC_ADAC_RAMPCLT_REDUCE        (0x1 << 7)
+
+struct evea_priv {
+       struct clk *clk, *clk_exiv;
+       struct reset_control *rst, *rst_exiv, *rst_adamv;
+       struct regmap *regmap;
+
+       int switch_lin;
+       int switch_lo;
+       int switch_hp;
+};
+
+static const struct snd_soc_dapm_widget evea_widgets[] = {
+       SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_INPUT("LIN1_LP"),
+       SND_SOC_DAPM_INPUT("LIN1_RP"),
+       SND_SOC_DAPM_INPUT("LIN2_LP"),
+       SND_SOC_DAPM_INPUT("LIN2_RP"),
+       SND_SOC_DAPM_INPUT("LIN3_LP"),
+       SND_SOC_DAPM_INPUT("LIN3_RP"),
+
+       SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_OUTPUT("HP1_L"),
+       SND_SOC_DAPM_OUTPUT("HP1_R"),
+       SND_SOC_DAPM_OUTPUT("LO2_L"),
+       SND_SOC_DAPM_OUTPUT("LO2_R"),
+};
+
+static const struct snd_soc_dapm_route evea_routes[] = {
+       { "ADC", NULL, "LIN1_LP" },
+       { "ADC", NULL, "LIN1_RP" },
+       { "ADC", NULL, "LIN2_LP" },
+       { "ADC", NULL, "LIN2_RP" },
+       { "ADC", NULL, "LIN3_LP" },
+       { "ADC", NULL, "LIN3_RP" },
+
+       { "HP1_L", NULL, "DAC" },
+       { "HP1_R", NULL, "DAC" },
+       { "LO2_L", NULL, "DAC" },
+       { "LO2_R", NULL, "DAC" },
+};
+
+static void evea_set_power_state_on(struct evea_priv *evea)
+{
+       struct regmap *map = evea->regmap;
+
+       regmap_update_bits(map, AANAPOW, AANAPOW_A_POWD,
+                          AANAPOW_A_POWD);
+
+       regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+                          ADAC1ODC_HP_DIS_RES_ON);
+
+       regmap_update_bits(map, ADAC1ODC, ADAC1ODC_ADAC_RAMPCLT_MASK,
+                          ADAC1ODC_ADAC_RAMPCLT_REDUCE);
+
+       regmap_update_bits(map, ADACSEQ2(0), ADACSEQ2_ADACIN_FIX, 0);
+       regmap_update_bits(map, ADACSEQ2(1), ADACSEQ2_ADACIN_FIX, 0);
+       regmap_update_bits(map, ADACSEQ2(2), ADACSEQ2_ADACIN_FIX, 0);
+}
+
+static void evea_set_power_state_off(struct evea_priv *evea)
+{
+       struct regmap *map = evea->regmap;
+
+       regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+                          ADAC1ODC_HP_DIS_RES_ON);
+
+       regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
+                          ADACSEQ1_MMUTE);
+       regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
+                          ADACSEQ1_MMUTE);
+       regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
+                          ADACSEQ1_MMUTE);
+
+       regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
+       regmap_update_bits(map, ALO2OUTPOW, ALO2OUTPOW_LO2_ON, 0);
+       regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
+}
+
+static int evea_update_switch_lin(struct evea_priv *evea)
+{
+       struct regmap *map = evea->regmap;
+
+       if (evea->switch_lin) {
+               regmap_update_bits(map, ALINEPOW,
+                                  ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD,
+                                  ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD);
+
+               regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD,
+                                  AADCPOW_AADC_POWD);
+               regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD,
+                                  AADCPOW_AADC_POWD);
+       } else {
+               regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD, 0);
+               regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD, 0);
+
+               regmap_update_bits(map, ALINEPOW,
+                                  ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD, 0);
+       }
+
+       return 0;
+}
+
+static int evea_update_switch_lo(struct evea_priv *evea)
+{
+       struct regmap *map = evea->regmap;
+
+       if (evea->switch_lo) {
+               regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, 0);
+               regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, 0);
+
+               regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON,
+                                  ALO1OUTPOW_LO1_ON);
+               regmap_update_bits(map, ALO2OUTPOW,
+                                  ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
+                                  ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON);
+       } else {
+               regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
+                                  ADACSEQ1_MMUTE);
+               regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
+                                  ADACSEQ1_MMUTE);
+
+               regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
+               regmap_update_bits(map, ALO2OUTPOW,
+                                  ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
+                                  0);
+       }
+
+       return 0;
+}
+
+static int evea_update_switch_hp(struct evea_priv *evea)
+{
+       struct regmap *map = evea->regmap;
+
+       if (evea->switch_hp) {
+               regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, 0);
+
+               regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON,
+                                  AHPOUTPOW_HP_ON);
+
+               regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+                                  ADAC1ODC_HP_DIS_RES_OFF);
+       } else {
+               regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+                                  ADAC1ODC_HP_DIS_RES_ON);
+
+               regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
+                                  ADACSEQ1_MMUTE);
+
+               regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
+       }
+
+       return 0;
+}
+
+static void evea_update_switch_all(struct evea_priv *evea)
+{
+       evea_update_switch_lin(evea);
+       evea_update_switch_lo(evea);
+       evea_update_switch_hp(evea);
+}
+
+static int evea_get_switch_lin(struct snd_kcontrol *kcontrol,
+                              struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+       ucontrol->value.integer.value[0] = evea->switch_lin;
+
+       return 0;
+}
+
+static int evea_set_switch_lin(struct snd_kcontrol *kcontrol,
+                              struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+       if (evea->switch_lin == ucontrol->value.integer.value[0])
+               return 0;
+
+       evea->switch_lin = ucontrol->value.integer.value[0];
+
+       return evea_update_switch_lin(evea);
+}
+
+static int evea_get_switch_lo(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+       ucontrol->value.integer.value[0] = evea->switch_lo;
+
+       return 0;
+}
+
+static int evea_set_switch_lo(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+       if (evea->switch_lo == ucontrol->value.integer.value[0])
+               return 0;
+
+       evea->switch_lo = ucontrol->value.integer.value[0];
+
+       return evea_update_switch_lo(evea);
+}
+
+static int evea_get_switch_hp(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+       ucontrol->value.integer.value[0] = evea->switch_hp;
+
+       return 0;
+}
+
+static int evea_set_switch_hp(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+       if (evea->switch_hp == ucontrol->value.integer.value[0])
+               return 0;
+
+       evea->switch_hp = ucontrol->value.integer.value[0];
+
+       return evea_update_switch_hp(evea);
+}
+
+static const struct snd_kcontrol_new eva_controls[] = {
+       SOC_SINGLE_BOOL_EXT("Line Capture Switch", 0,
+                           evea_get_switch_lin, evea_set_switch_lin),
+       SOC_SINGLE_BOOL_EXT("Line Playback Switch", 0,
+                           evea_get_switch_lo, evea_set_switch_lo),
+       SOC_SINGLE_BOOL_EXT("Headphone Playback Switch", 0,
+                           evea_get_switch_hp, evea_set_switch_hp),
+};
+
+static int evea_codec_probe(struct snd_soc_codec *codec)
+{
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+       evea->switch_lin = 1;
+       evea->switch_lo = 1;
+       evea->switch_hp = 1;
+
+       evea_set_power_state_on(evea);
+       evea_update_switch_all(evea);
+
+       return 0;
+}
+
+static int evea_codec_suspend(struct snd_soc_codec *codec)
+{
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+       evea_set_power_state_off(evea);
+
+       reset_control_assert(evea->rst_adamv);
+       reset_control_assert(evea->rst_exiv);
+       reset_control_assert(evea->rst);
+
+       clk_disable_unprepare(evea->clk_exiv);
+       clk_disable_unprepare(evea->clk);
+
+       return 0;
+}
+
+static int evea_codec_resume(struct snd_soc_codec *codec)
+{
+       struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       ret = clk_prepare_enable(evea->clk);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(evea->clk_exiv);
+       if (ret)
+               goto err_out_clock;
+
+       ret = reset_control_deassert(evea->rst);
+       if (ret)
+               goto err_out_clock_exiv;
+
+       ret = reset_control_deassert(evea->rst_exiv);
+       if (ret)
+               goto err_out_reset;
+
+       ret = reset_control_deassert(evea->rst_adamv);
+       if (ret)
+               goto err_out_reset_exiv;
+
+       evea_set_power_state_on(evea);
+       evea_update_switch_all(evea);
+
+       return 0;
+
+err_out_reset_exiv:
+       reset_control_assert(evea->rst_exiv);
+
+err_out_reset:
+       reset_control_assert(evea->rst);
+
+err_out_clock_exiv:
+       clk_disable_unprepare(evea->clk_exiv);
+
+err_out_clock:
+       clk_disable_unprepare(evea->clk);
+
+       return ret;
+}
+
+static struct snd_soc_codec_driver soc_codec_evea = {
+       .probe   = evea_codec_probe,
+       .suspend = evea_codec_suspend,
+       .resume  = evea_codec_resume,
+
+       .component_driver = {
+               .dapm_widgets = evea_widgets,
+               .num_dapm_widgets = ARRAY_SIZE(evea_widgets),
+               .dapm_routes = evea_routes,
+               .num_dapm_routes = ARRAY_SIZE(evea_routes),
+               .controls = eva_controls,
+               .num_controls = ARRAY_SIZE(eva_controls),
+       },
+};
+
+static struct snd_soc_dai_driver soc_dai_evea[] = {
+       {
+               .name     = DRV_NAME "-line1",
+               .playback = {
+                       .stream_name  = "Line Out 1",
+                       .formats      = EVEA_FORMATS,
+                       .rates        = EVEA_RATES,
+                       .channels_min = 2,
+                       .channels_max = 2,
+               },
+               .capture = {
+                       .stream_name  = "Line In 1",
+                       .formats      = EVEA_FORMATS,
+                       .rates        = EVEA_RATES,
+                       .channels_min = 2,
+                       .channels_max = 2,
+               },
+       },
+       {
+               .name     = DRV_NAME "-hp1",
+               .playback = {
+                       .stream_name  = "Headphone 1",
+                       .formats      = EVEA_FORMATS,
+                       .rates        = EVEA_RATES,
+                       .channels_min = 2,
+                       .channels_max = 2,
+               },
+       },
+       {
+               .name     = DRV_NAME "-lo2",
+               .playback = {
+                       .stream_name  = "Line Out 2",
+                       .formats      = EVEA_FORMATS,
+                       .rates        = EVEA_RATES,
+                       .channels_min = 2,
+                       .channels_max = 2,
+               },
+       },
+};
+
+static const struct regmap_config evea_regmap_config = {
+       .reg_bits      = 32,
+       .reg_stride    = 4,
+       .val_bits      = 32,
+       .max_register  = 0xffc,
+       .cache_type    = REGCACHE_NONE,
+};
+
+static int evea_probe(struct platform_device *pdev)
+{
+       struct evea_priv *evea;
+       struct resource *res;
+       void __iomem *preg;
+       int ret;
+
+       evea = devm_kzalloc(&pdev->dev, sizeof(struct evea_priv), GFP_KERNEL);
+       if (!evea)
+               return -ENOMEM;
+
+       evea->clk = devm_clk_get(&pdev->dev, "evea");
+       if (IS_ERR(evea->clk))
+               return PTR_ERR(evea->clk);
+
+       evea->clk_exiv = devm_clk_get(&pdev->dev, "exiv");
+       if (IS_ERR(evea->clk_exiv))
+               return PTR_ERR(evea->clk_exiv);
+
+       evea->rst = devm_reset_control_get_shared(&pdev->dev, "evea");
+       if (IS_ERR(evea->rst))
+               return PTR_ERR(evea->rst);
+
+       evea->rst_exiv = devm_reset_control_get_shared(&pdev->dev, "exiv");
+       if (IS_ERR(evea->rst_exiv))
+               return PTR_ERR(evea->rst_exiv);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       preg = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(preg))
+               return PTR_ERR(preg);
+
+       evea->regmap = devm_regmap_init_mmio(&pdev->dev, preg,
+                                            &evea_regmap_config);
+       if (IS_ERR(evea->regmap))
+               return PTR_ERR(evea->regmap);
+
+       ret = clk_prepare_enable(evea->clk);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(evea->clk_exiv);
+       if (ret)
+               goto err_out_clock;
+
+       ret = reset_control_deassert(evea->rst);
+       if (ret)
+               goto err_out_clock_exiv;
+
+       ret = reset_control_deassert(evea->rst_exiv);
+       if (ret)
+               goto err_out_reset;
+
+       /* ADAMV will hangup if EXIV reset is asserted */
+       evea->rst_adamv = devm_reset_control_get_shared(&pdev->dev, "adamv");
+       if (IS_ERR(evea->rst_adamv)) {
+               ret = PTR_ERR(evea->rst_adamv);
+               goto err_out_reset_exiv;
+       }
+
+       ret = reset_control_deassert(evea->rst_adamv);
+       if (ret)
+               goto err_out_reset_exiv;
+
+       platform_set_drvdata(pdev, evea);
+
+       ret = snd_soc_register_codec(&pdev->dev, &soc_codec_evea,
+                                    soc_dai_evea, ARRAY_SIZE(soc_dai_evea));
+       if (ret)
+               goto err_out_reset_adamv;
+
+       return 0;
+
+err_out_reset_adamv:
+       reset_control_assert(evea->rst_adamv);
+
+err_out_reset_exiv:
+       reset_control_assert(evea->rst_exiv);
+
+err_out_reset:
+       reset_control_assert(evea->rst);
+
+err_out_clock_exiv:
+       clk_disable_unprepare(evea->clk_exiv);
+
+err_out_clock:
+       clk_disable_unprepare(evea->clk);
+
+       return ret;
+}
+
+static int evea_remove(struct platform_device *pdev)
+{
+       struct evea_priv *evea = platform_get_drvdata(pdev);
+
+       snd_soc_unregister_codec(&pdev->dev);
+
+       reset_control_assert(evea->rst_adamv);
+       reset_control_assert(evea->rst_exiv);
+       reset_control_assert(evea->rst);
+
+       clk_disable_unprepare(evea->clk_exiv);
+       clk_disable_unprepare(evea->clk);
+
+       return 0;
+}
+
+static const struct of_device_id evea_of_match[] = {
+       { .compatible = "socionext,uniphier-evea", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, evea_of_match);
+
+static struct platform_driver evea_codec_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .of_match_table = of_match_ptr(evea_of_match),
+       },
+       .probe  = evea_probe,
+       .remove = evea_remove,
+};
+module_platform_driver(evea_codec_driver);
+
+MODULE_AUTHOR("Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier EVEA codec driver");
+MODULE_LICENSE("GPL v2");
index 070a688..c60a577 100644 (file)
@@ -163,3 +163,7 @@ static struct platform_driver snd_soc_mop500_driver = {
 };
 
 module_platform_driver(snd_soc_mop500_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ASoC MOP500 board driver");
+MODULE_AUTHOR("Ola Lilja");
index f12c01d..d35ba77 100644 (file)
@@ -165,3 +165,8 @@ int ux500_pcm_unregister_platform(struct platform_device *pdev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
+
+MODULE_AUTHOR("Ola Lilja");
+MODULE_AUTHOR("Roger Nilsson");
+MODULE_DESCRIPTION("ASoC UX500 driver");
+MODULE_LICENSE("GPL v2");
index 99b73c6..b4efb22 100644 (file)
@@ -119,13 +119,6 @@ struct sound_unit
        char name[32];
 };
 
-#ifdef CONFIG_SOUND_MSNDCLAS
-extern int msnd_classic_init(void);
-#endif
-#ifdef CONFIG_SOUND_MSNDPIN
-extern int msnd_pinnacle_init(void);
-#endif
-
 /*
  * By default, OSS sound_core claims full legacy minor range (0-255)
  * of SOUND_MAJOR to trap open attempts to any sound minor and
@@ -452,26 +445,6 @@ int register_sound_mixer(const struct file_operations *fops, int dev)
 
 EXPORT_SYMBOL(register_sound_mixer);
 
-/**
- *     register_sound_midi - register a midi device
- *     @fops: File operations for the driver
- *     @dev: Unit number to allocate
- *
- *     Allocate a midi device. Unit is the number of the midi device requested.
- *     Pass -1 to request the next free midi unit.
- *
- *     Return: On success, the allocated number is returned. On failure,
- *     a negative error code is returned.
- */
-
-int register_sound_midi(const struct file_operations *fops, int dev)
-{
-       return sound_insert_unit(&chains[2], fops, dev, 2, 130,
-                                "midi", S_IRUSR | S_IWUSR, NULL);
-}
-
-EXPORT_SYMBOL(register_sound_midi);
-
 /*
  *     DSP's are registered as a triple. Register only one and cheat
  *     in open - see below.
@@ -532,21 +505,6 @@ void unregister_sound_mixer(int unit)
 
 EXPORT_SYMBOL(unregister_sound_mixer);
 
-/**
- *     unregister_sound_midi - unregister a midi device
- *     @unit: unit number to allocate
- *
- *     Release a sound device that was allocated with register_sound_midi().
- *     The unit passed is the return value from the register function.
- */
-
-void unregister_sound_midi(int unit)
-{
-       sound_remove_unit(&chains[2], unit);
-}
-
-EXPORT_SYMBOL(unregister_sound_midi);
-
 /**
  *     unregister_sound_dsp - unregister a DSP device
  *     @unit: unit number to allocate
index 23d1d23..8018d56 100644 (file)
@@ -585,15 +585,24 @@ static int usb_audio_probe(struct usb_interface *intf,
                 * now look for an empty slot and create a new card instance
                 */
                for (i = 0; i < SNDRV_CARDS; i++)
-                       if (enable[i] && ! usb_chip[i] &&
+                       if (!usb_chip[i] &&
                            (vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) &&
                            (pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) {
-                               err = snd_usb_audio_create(intf, dev, i, quirk,
-                                                          id, &chip);
-                               if (err < 0)
+                               if (enable[i]) {
+                                       err = snd_usb_audio_create(intf, dev, i, quirk,
+                                                                  id, &chip);
+                                       if (err < 0)
+                                               goto __error;
+                                       chip->pm_intf = intf;
+                                       break;
+                               } else if (vid[i] != -1 || pid[i] != -1) {
+                                       dev_info(&dev->dev,
+                                                "device (%04x:%04x) is disabled\n",
+                                                USB_ID_VENDOR(id),
+                                                USB_ID_PRODUCT(id));
+                                       err = -ENOENT;
                                        goto __error;
-                               chip->pm_intf = intf;
-                               break;
+                               }
                        }
                if (!chip) {
                        dev_err(&dev->dev, "no available usb audio device\n");
index 0537c63..9afb8ab 100644 (file)
@@ -204,6 +204,10 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
                                    int index, char *buf, int maxlen)
 {
        int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
+
+       if (len < 0)
+               return 0;
+
        buf[len] = 0;
        return len;
 }
@@ -652,10 +656,14 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
                         unsigned char *name, int maxlen, int term_only)
 {
        struct iterm_name_combo *names;
+       int len;
 
-       if (iterm->name)
-               return snd_usb_copy_string_desc(state, iterm->name,
+       if (iterm->name) {
+               len = snd_usb_copy_string_desc(state, iterm->name,
                                                name, maxlen);
+               if (len)
+                       return len;
+       }
 
        /* virtual type - not a real terminal */
        if (iterm->type >> 16) {
@@ -1476,9 +1484,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
                        return -EINVAL;
                }
                csize = hdr->bControlSize;
-               if (csize <= 1) {
+               if (!csize) {
                        usb_audio_dbg(state->chip,
-                                     "unit %u: invalid bControlSize <= 1\n",
+                                     "unit %u: invalid bControlSize == 0\n",
                                      unitid);
                        return -EINVAL;
                }
@@ -2169,19 +2177,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
        kctl->private_value = (unsigned long)namelist;
        kctl->private_free = usb_mixer_selector_elem_free;
 
-       nameid = uac_selector_unit_iSelector(desc);
+       /* check the static mapping table at first */
        len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
-       if (len)
-               ;
-       else if (nameid)
-               snd_usb_copy_string_desc(state, nameid, kctl->id.name,
-                                        sizeof(kctl->id.name));
-       else {
-               len = get_term_name(state, &state->oterm,
+       if (!len) {
+               /* no mapping ? */
+               /* if iSelector is given, use it */
+               nameid = uac_selector_unit_iSelector(desc);
+               if (nameid)
+                       len = snd_usb_copy_string_desc(state, nameid,
+                                                      kctl->id.name,
+                                                      sizeof(kctl->id.name));
+               /* ... or pick up the terminal name at next */
+               if (!len)
+                       len = get_term_name(state, &state->oterm,
                                    kctl->id.name, sizeof(kctl->id.name), 0);
+               /* ... or use the fixed string "USB" as the last resort */
                if (!len)
                        strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
 
+               /* and add the proper suffix */
                if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
                        append_ctl_name(kctl, " Clock Source");
                else if ((state->oterm.type & 0xff00) == 0x0100)
index e1e7ce9..e6359d3 100644 (file)
@@ -27,6 +27,7 @@
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
+#include <linux/hid.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
@@ -1721,6 +1722,83 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
        return 0;
 }
 
+/* Creative Sound Blaster E1 */
+
+static int snd_soundblaster_e1_switch_get(struct snd_kcontrol *kcontrol,
+                                         struct snd_ctl_elem_value *ucontrol)
+{
+       ucontrol->value.integer.value[0] = kcontrol->private_value;
+       return 0;
+}
+
+static int snd_soundblaster_e1_switch_update(struct usb_mixer_interface *mixer,
+                                            unsigned char state)
+{
+       struct snd_usb_audio *chip = mixer->chip;
+       int err;
+       unsigned char buff[2];
+
+       buff[0] = 0x02;
+       buff[1] = state ? 0x02 : 0x00;
+
+       err = snd_usb_lock_shutdown(chip);
+       if (err < 0)
+               return err;
+       err = snd_usb_ctl_msg(chip->dev,
+                       usb_sndctrlpipe(chip->dev, 0), HID_REQ_SET_REPORT,
+                       USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+                       0x0202, 3, buff, 2);
+       snd_usb_unlock_shutdown(chip);
+       return err;
+}
+
+static int snd_soundblaster_e1_switch_put(struct snd_kcontrol *kcontrol,
+                                         struct snd_ctl_elem_value *ucontrol)
+{
+       struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+       unsigned char value = !!ucontrol->value.integer.value[0];
+       int err;
+
+       if (kcontrol->private_value == value)
+               return 0;
+       kcontrol->private_value = value;
+       err = snd_soundblaster_e1_switch_update(list->mixer, value);
+       return err < 0 ? err : 1;
+}
+
+static int snd_soundblaster_e1_switch_resume(struct usb_mixer_elem_list *list)
+{
+       return snd_soundblaster_e1_switch_update(list->mixer,
+                                                list->kctl->private_value);
+}
+
+static int snd_soundblaster_e1_switch_info(struct snd_kcontrol *kcontrol,
+                                          struct snd_ctl_elem_info *uinfo)
+{
+       static const char *const texts[2] = {
+               "Mic", "Aux"
+       };
+
+       return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
+}
+
+static struct snd_kcontrol_new snd_soundblaster_e1_input_switch = {
+       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+       .name = "Input Source",
+       .info = snd_soundblaster_e1_switch_info,
+       .get = snd_soundblaster_e1_switch_get,
+       .put = snd_soundblaster_e1_switch_put,
+       .private_value = 0,
+};
+
+static int snd_soundblaster_e1_switch_create(struct usb_mixer_interface *mixer)
+{
+       return add_single_ctl_with_resume(mixer, 0,
+                                         snd_soundblaster_e1_switch_resume,
+                                         &snd_soundblaster_e1_input_switch,
+                                         NULL);
+}
+
 int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
 {
        int err = 0;
@@ -1802,6 +1880,10 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
        case USB_ID(0x1235, 0x800c): /* Focusrite Scarlett 18i20 */
                err = snd_scarlett_controls_create(mixer);
                break;
+
+       case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
+               err = snd_soundblaster_e1_switch_create(mixer);
+               break;
        }
 
        return err;
index 8a59d47..5025204 100644 (file)
@@ -3277,4 +3277,52 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
        }
 },
 
+{
+       /*
+        * Nura's first gen headphones use Cambridge Silicon Radio's vendor
+        * ID, but it looks like the product ID actually is only for Nura.
+        * The capture interface does not work at all (even on Windows),
+        * and only the 48 kHz sample rate works for the playback interface.
+        */
+       USB_DEVICE(0x0a12, 0x1243),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       /* Capture */
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_IGNORE_INTERFACE,
+                       },
+                       /* Playback */
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .iface = 2,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+                                               UAC_EP_CS_ATTR_SAMPLE_RATE,
+                                       .endpoint = 0x03,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC,
+                                       .rates = SNDRV_PCM_RATE_48000,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) {
+                                               48000
+                                       }
+                               }
+                       },
+               }
+       }
+},
+
 #undef USB_DEVICE_VENDOR_SPEC
index 77eecaa..a66ef57 100644 (file)
@@ -1166,10 +1166,11 @@ static bool is_marantz_denon_dac(unsigned int id)
 /* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
  * between PCM/DOP and native DSD mode
  */
-static bool is_teac_50X_dac(unsigned int id)
+static bool is_teac_dsd_dac(unsigned int id)
 {
        switch (id) {
        case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+       case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
                return true;
        }
        return false;
@@ -1202,7 +1203,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
                        break;
                }
                mdelay(20);
-       } else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+       } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
                /* Vendor mode switch cmd is required. */
                switch (fmt->altsetting) {
                case 3: /* DSD mode (DSD_U32) requested */
@@ -1392,7 +1393,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        }
 
        /* TEAC devices with USB DAC functionality */
-       if (is_teac_50X_dac(chip->usb_id)) {
+       if (is_teac_dsd_dac(chip->usb_id)) {
                if (fp->altsetting == 3)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
        }
index 1f57bbe..6edd177 100644 (file)
@@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
        (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
 #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
 
+/* PL1 Physical Timer Registers */
+#define KVM_REG_ARM_PTIMER_CTL         ARM_CP15_REG32(0, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CNT         ARM_CP15_REG64(0, 14)
+#define KVM_REG_ARM_PTIMER_CVAL                ARM_CP15_REG64(2, 14)
+
+/* Virtual Timer Registers */
 #define KVM_REG_ARM_TIMER_CTL          ARM_CP15_REG32(0, 14, 3, 1)
 #define KVM_REG_ARM_TIMER_CNT          ARM_CP15_REG64(1, 14)
 #define KVM_REG_ARM_TIMER_CVAL         ARM_CP15_REG64(3, 14)
@@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
 #define   KVM_DEV_ARM_ITS_SAVE_TABLES          1
 #define   KVM_DEV_ARM_ITS_RESTORE_TABLES       2
 #define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define   KVM_DEV_ARM_ITS_CTRL_RESET           4
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..b551b74
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 51149ec..9abbf30 100644 (file)
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
 
 #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
 
+/* Physical Timer EL0 Registers */
+#define KVM_REG_ARM_PTIMER_CTL         ARM64_SYS_REG(3, 3, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CVAL                ARM64_SYS_REG(3, 3, 14, 2, 2)
+#define KVM_REG_ARM_PTIMER_CNT         ARM64_SYS_REG(3, 3, 14, 0, 1)
+
+/* EL0 Virtual Timer Registers */
 #define KVM_REG_ARM_TIMER_CTL          ARM64_SYS_REG(3, 3, 14, 3, 1)
 #define KVM_REG_ARM_TIMER_CNT          ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL         ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
 #define   KVM_DEV_ARM_ITS_SAVE_TABLES           1
 #define   KVM_DEV_ARM_ITS_RESTORE_TABLES        2
 #define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define   KVM_DEV_ARM_ITS_CTRL_RESET           4
 
 /* Device Control API on vcpu fd */
 #define KVM_ARM_VCPU_PMU_V3_CTRL       0
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..0a8e37a
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include "ptrace.h"
+
+typedef user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 9ad172d..38535a5 100644 (file)
@@ -6,10 +6,6 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index c36c97f..84606b8 100644 (file)
@@ -4,10 +4,6 @@
  *
  * Copyright 2014 IBM Corp.
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef __LINUX_KVM_PERF_S390_H
diff --git a/tools/arch/s390/include/uapi/asm/perf_regs.h b/tools/arch/s390/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..d17dd9e
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_S390_PERF_REGS_H
+#define _ASM_S390_PERF_REGS_H
+
+enum perf_event_s390_regs {
+       PERF_REG_S390_R0,
+       PERF_REG_S390_R1,
+       PERF_REG_S390_R2,
+       PERF_REG_S390_R3,
+       PERF_REG_S390_R4,
+       PERF_REG_S390_R5,
+       PERF_REG_S390_R6,
+       PERF_REG_S390_R7,
+       PERF_REG_S390_R8,
+       PERF_REG_S390_R9,
+       PERF_REG_S390_R10,
+       PERF_REG_S390_R11,
+       PERF_REG_S390_R12,
+       PERF_REG_S390_R13,
+       PERF_REG_S390_R14,
+       PERF_REG_S390_R15,
+       PERF_REG_S390_FP0,
+       PERF_REG_S390_FP1,
+       PERF_REG_S390_FP2,
+       PERF_REG_S390_FP3,
+       PERF_REG_S390_FP4,
+       PERF_REG_S390_FP5,
+       PERF_REG_S390_FP6,
+       PERF_REG_S390_FP7,
+       PERF_REG_S390_FP8,
+       PERF_REG_S390_FP9,
+       PERF_REG_S390_FP10,
+       PERF_REG_S390_FP11,
+       PERF_REG_S390_FP12,
+       PERF_REG_S390_FP13,
+       PERF_REG_S390_FP14,
+       PERF_REG_S390_FP15,
+       PERF_REG_S390_MASK,
+       PERF_REG_S390_PC,
+
+       PERF_REG_S390_MAX
+};
+
+#endif /* _ASM_S390_PERF_REGS_H */
diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h
new file mode 100644 (file)
index 0000000..543dd70
--- /dev/null
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *  S390 version
+ *    Copyright IBM Corp. 1999, 2000
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#ifndef _UAPI_S390_PTRACE_H
+#define _UAPI_S390_PTRACE_H
+
+/*
+ * Offsets in the user_regs_struct. They are used for the ptrace
+ * system call and in entry.S
+ */
+#ifndef __s390x__
+
+#define PT_PSWMASK  0x00
+#define PT_PSWADDR  0x04
+#define PT_GPR0     0x08
+#define PT_GPR1     0x0C
+#define PT_GPR2     0x10
+#define PT_GPR3     0x14
+#define PT_GPR4     0x18
+#define PT_GPR5     0x1C
+#define PT_GPR6     0x20
+#define PT_GPR7     0x24
+#define PT_GPR8     0x28
+#define PT_GPR9     0x2C
+#define PT_GPR10    0x30
+#define PT_GPR11    0x34
+#define PT_GPR12    0x38
+#define PT_GPR13    0x3C
+#define PT_GPR14    0x40
+#define PT_GPR15    0x44
+#define PT_ACR0     0x48
+#define PT_ACR1     0x4C
+#define PT_ACR2     0x50
+#define PT_ACR3     0x54
+#define PT_ACR4            0x58
+#define PT_ACR5            0x5C
+#define PT_ACR6            0x60
+#define PT_ACR7            0x64
+#define PT_ACR8            0x68
+#define PT_ACR9            0x6C
+#define PT_ACR10    0x70
+#define PT_ACR11    0x74
+#define PT_ACR12    0x78
+#define PT_ACR13    0x7C
+#define PT_ACR14    0x80
+#define PT_ACR15    0x84
+#define PT_ORIGGPR2 0x88
+#define PT_FPC     0x90
+/*
+ * A nasty fact of life that the ptrace api
+ * only supports passing of longs.
+ */
+#define PT_FPR0_HI  0x98
+#define PT_FPR0_LO  0x9C
+#define PT_FPR1_HI  0xA0
+#define PT_FPR1_LO  0xA4
+#define PT_FPR2_HI  0xA8
+#define PT_FPR2_LO  0xAC
+#define PT_FPR3_HI  0xB0
+#define PT_FPR3_LO  0xB4
+#define PT_FPR4_HI  0xB8
+#define PT_FPR4_LO  0xBC
+#define PT_FPR5_HI  0xC0
+#define PT_FPR5_LO  0xC4
+#define PT_FPR6_HI  0xC8
+#define PT_FPR6_LO  0xCC
+#define PT_FPR7_HI  0xD0
+#define PT_FPR7_LO  0xD4
+#define PT_FPR8_HI  0xD8
+#define PT_FPR8_LO  0XDC
+#define PT_FPR9_HI  0xE0
+#define PT_FPR9_LO  0xE4
+#define PT_FPR10_HI 0xE8
+#define PT_FPR10_LO 0xEC
+#define PT_FPR11_HI 0xF0
+#define PT_FPR11_LO 0xF4
+#define PT_FPR12_HI 0xF8
+#define PT_FPR12_LO 0xFC
+#define PT_FPR13_HI 0x100
+#define PT_FPR13_LO 0x104
+#define PT_FPR14_HI 0x108
+#define PT_FPR14_LO 0x10C
+#define PT_FPR15_HI 0x110
+#define PT_FPR15_LO 0x114
+#define PT_CR_9            0x118
+#define PT_CR_10    0x11C
+#define PT_CR_11    0x120
+#define PT_IEEE_IP  0x13C
+#define PT_LASTOFF  PT_IEEE_IP
+#define PT_ENDREGS  0x140-1
+
+#define GPR_SIZE       4
+#define CR_SIZE                4
+
+#define STACK_FRAME_OVERHEAD   96      /* size of minimum stack frame */
+
+#else /* __s390x__ */
+
+#define PT_PSWMASK  0x00
+#define PT_PSWADDR  0x08
+#define PT_GPR0     0x10
+#define PT_GPR1     0x18
+#define PT_GPR2     0x20
+#define PT_GPR3     0x28
+#define PT_GPR4     0x30
+#define PT_GPR5     0x38
+#define PT_GPR6     0x40
+#define PT_GPR7     0x48
+#define PT_GPR8     0x50
+#define PT_GPR9     0x58
+#define PT_GPR10    0x60
+#define PT_GPR11    0x68
+#define PT_GPR12    0x70
+#define PT_GPR13    0x78
+#define PT_GPR14    0x80
+#define PT_GPR15    0x88
+#define PT_ACR0     0x90
+#define PT_ACR1     0x94
+#define PT_ACR2     0x98
+#define PT_ACR3     0x9C
+#define PT_ACR4            0xA0
+#define PT_ACR5            0xA4
+#define PT_ACR6            0xA8
+#define PT_ACR7            0xAC
+#define PT_ACR8            0xB0
+#define PT_ACR9            0xB4
+#define PT_ACR10    0xB8
+#define PT_ACR11    0xBC
+#define PT_ACR12    0xC0
+#define PT_ACR13    0xC4
+#define PT_ACR14    0xC8
+#define PT_ACR15    0xCC
+#define PT_ORIGGPR2 0xD0
+#define PT_FPC     0xD8
+#define PT_FPR0     0xE0
+#define PT_FPR1     0xE8
+#define PT_FPR2     0xF0
+#define PT_FPR3     0xF8
+#define PT_FPR4     0x100
+#define PT_FPR5     0x108
+#define PT_FPR6     0x110
+#define PT_FPR7     0x118
+#define PT_FPR8     0x120
+#define PT_FPR9     0x128
+#define PT_FPR10    0x130
+#define PT_FPR11    0x138
+#define PT_FPR12    0x140
+#define PT_FPR13    0x148
+#define PT_FPR14    0x150
+#define PT_FPR15    0x158
+#define PT_CR_9     0x160
+#define PT_CR_10    0x168
+#define PT_CR_11    0x170
+#define PT_IEEE_IP  0x1A8
+#define PT_LASTOFF  PT_IEEE_IP
+#define PT_ENDREGS  0x1B0-1
+
+#define GPR_SIZE       8
+#define CR_SIZE                8
+
+#define STACK_FRAME_OVERHEAD   160      /* size of minimum stack frame */
+
+#endif /* __s390x__ */
+
+#define NUM_GPRS       16
+#define NUM_FPRS       16
+#define NUM_CRS                16
+#define NUM_ACRS       16
+
+#define NUM_CR_WORDS   3
+
+#define FPR_SIZE       8
+#define FPC_SIZE       4
+#define FPC_PAD_SIZE   4 /* gcc insists on aligning the fpregs */
+#define ACR_SIZE       4
+
+
+#define PTRACE_OLDSETOPTIONS        21
+
+#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+typedef union {
+       float   f;
+       double  d;
+       __u64   ui;
+       struct
+       {
+               __u32 hi;
+               __u32 lo;
+       } fp;
+} freg_t;
+
+typedef struct {
+       __u32   fpc;
+       __u32   pad;
+       freg_t  fprs[NUM_FPRS];
+} s390_fp_regs;
+
+#define FPC_EXCEPTION_MASK     0xF8000000
+#define FPC_FLAGS_MASK         0x00F80000
+#define FPC_DXC_MASK           0x0000FF00
+#define FPC_RM_MASK            0x00000003
+
+/* this typedef defines how a Program Status Word looks like */
+typedef struct {
+       unsigned long mask;
+       unsigned long addr;
+} __attribute__ ((aligned(8))) psw_t;
+
+#ifndef __s390x__
+
+#define PSW_MASK_PER           0x40000000UL
+#define PSW_MASK_DAT           0x04000000UL
+#define PSW_MASK_IO            0x02000000UL
+#define PSW_MASK_EXT           0x01000000UL
+#define PSW_MASK_KEY           0x00F00000UL
+#define PSW_MASK_BASE          0x00080000UL    /* always one */
+#define PSW_MASK_MCHECK                0x00040000UL
+#define PSW_MASK_WAIT          0x00020000UL
+#define PSW_MASK_PSTATE                0x00010000UL
+#define PSW_MASK_ASC           0x0000C000UL
+#define PSW_MASK_CC            0x00003000UL
+#define PSW_MASK_PM            0x00000F00UL
+#define PSW_MASK_RI            0x00000000UL
+#define PSW_MASK_EA            0x00000000UL
+#define PSW_MASK_BA            0x00000000UL
+
+#define PSW_MASK_USER          0x0000FF00UL
+
+#define PSW_ADDR_AMODE         0x80000000UL
+#define PSW_ADDR_INSN          0x7FFFFFFFUL
+
+#define PSW_DEFAULT_KEY                (((unsigned long) PAGE_DEFAULT_ACC) << 20)
+
+#define PSW_ASC_PRIMARY                0x00000000UL
+#define PSW_ASC_ACCREG         0x00004000UL
+#define PSW_ASC_SECONDARY      0x00008000UL
+#define PSW_ASC_HOME           0x0000C000UL
+
+#else /* __s390x__ */
+
+#define PSW_MASK_PER           0x4000000000000000UL
+#define PSW_MASK_DAT           0x0400000000000000UL
+#define PSW_MASK_IO            0x0200000000000000UL
+#define PSW_MASK_EXT           0x0100000000000000UL
+#define PSW_MASK_BASE          0x0000000000000000UL
+#define PSW_MASK_KEY           0x00F0000000000000UL
+#define PSW_MASK_MCHECK                0x0004000000000000UL
+#define PSW_MASK_WAIT          0x0002000000000000UL
+#define PSW_MASK_PSTATE                0x0001000000000000UL
+#define PSW_MASK_ASC           0x0000C00000000000UL
+#define PSW_MASK_CC            0x0000300000000000UL
+#define PSW_MASK_PM            0x00000F0000000000UL
+#define PSW_MASK_RI            0x0000008000000000UL
+#define PSW_MASK_EA            0x0000000100000000UL
+#define PSW_MASK_BA            0x0000000080000000UL
+
+#define PSW_MASK_USER          0x0000FF0180000000UL
+
+#define PSW_ADDR_AMODE         0x0000000000000000UL
+#define PSW_ADDR_INSN          0xFFFFFFFFFFFFFFFFUL
+
+#define PSW_DEFAULT_KEY                (((unsigned long) PAGE_DEFAULT_ACC) << 52)
+
+#define PSW_ASC_PRIMARY                0x0000000000000000UL
+#define PSW_ASC_ACCREG         0x0000400000000000UL
+#define PSW_ASC_SECONDARY      0x0000800000000000UL
+#define PSW_ASC_HOME           0x0000C00000000000UL
+
+#endif /* __s390x__ */
+
+
+/*
+ * The s390_regs structure is used to define the elf_gregset_t.
+ */
+typedef struct {
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+       unsigned int  acrs[NUM_ACRS];
+       unsigned long orig_gpr2;
+} s390_regs;
+
+/*
+ * The user_pt_regs structure exports the beginning of
+ * the in-kernel pt_regs structure to user space.
+ */
+typedef struct {
+       unsigned long args[1];
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+} user_pt_regs;
+
+/*
+ * Now for the user space program event recording (trace) definitions.
+ * The following structures are used only for the ptrace interface, don't
+ * touch or even look at it if you don't want to modify the user-space
+ * ptrace interface. In particular stay away from it for in-kernel PER.
+ */
+typedef struct {
+       unsigned long cr[NUM_CR_WORDS];
+} per_cr_words;
+
+#define PER_EM_MASK 0xE8000000UL
+
+typedef struct {
+#ifdef __s390x__
+       unsigned                       : 32;
+#endif /* __s390x__ */
+       unsigned em_branching          : 1;
+       unsigned em_instruction_fetch  : 1;
+       /*
+        * Switching on storage alteration automatically fixes
+        * the storage alteration event bit in the users std.
+        */
+       unsigned em_storage_alteration : 1;
+       unsigned em_gpr_alt_unused     : 1;
+       unsigned em_store_real_address : 1;
+       unsigned                       : 3;
+       unsigned branch_addr_ctl       : 1;
+       unsigned                       : 1;
+       unsigned storage_alt_space_ctl : 1;
+       unsigned                       : 21;
+       unsigned long starting_addr;
+       unsigned long ending_addr;
+} per_cr_bits;
+
+typedef struct {
+       unsigned short perc_atmid;
+       unsigned long address;
+       unsigned char access_id;
+} per_lowcore_words;
+
+typedef struct {
+       unsigned perc_branching          : 1;
+       unsigned perc_instruction_fetch  : 1;
+       unsigned perc_storage_alteration : 1;
+       unsigned perc_gpr_alt_unused     : 1;
+       unsigned perc_store_real_address : 1;
+       unsigned                         : 3;
+       unsigned atmid_psw_bit_31        : 1;
+       unsigned atmid_validity_bit      : 1;
+       unsigned atmid_psw_bit_32        : 1;
+       unsigned atmid_psw_bit_5         : 1;
+       unsigned atmid_psw_bit_16        : 1;
+       unsigned atmid_psw_bit_17        : 1;
+       unsigned si                      : 2;
+       unsigned long address;
+       unsigned                         : 4;
+       unsigned access_id               : 4;
+} per_lowcore_bits;
+
+typedef struct {
+       union {
+               per_cr_words   words;
+               per_cr_bits    bits;
+       } control_regs;
+       /*
+        * The single_step and instruction_fetch bits are obsolete,
+        * the kernel always sets them to zero. To enable single
+        * stepping use ptrace(PTRACE_SINGLESTEP) instead.
+        */
+       unsigned  single_step       : 1;
+       unsigned  instruction_fetch : 1;
+       unsigned                    : 30;
+       /*
+        * These addresses are copied into cr10 & cr11 if single
+        * stepping is switched off
+        */
+       unsigned long starting_addr;
+       unsigned long ending_addr;
+       union {
+               per_lowcore_words words;
+               per_lowcore_bits  bits;
+       } lowcore;
+} per_struct;
+
+typedef struct {
+       unsigned int  len;
+       unsigned long kernel_addr;
+       unsigned long process_addr;
+} ptrace_area;
+
+/*
+ * S/390 specific non posix ptrace requests. I chose unusual values so
+ * they are unlikely to clash with future ptrace definitions.
+ */
+#define PTRACE_PEEKUSR_AREA          0x5000
+#define PTRACE_POKEUSR_AREA          0x5001
+#define PTRACE_PEEKTEXT_AREA         0x5002
+#define PTRACE_PEEKDATA_AREA         0x5003
+#define PTRACE_POKETEXT_AREA         0x5004
+#define PTRACE_POKEDATA_AREA         0x5005
+#define PTRACE_GET_LAST_BREAK        0x5006
+#define PTRACE_PEEK_SYSTEM_CALL       0x5007
+#define PTRACE_POKE_SYSTEM_CALL              0x5008
+#define PTRACE_ENABLE_TE             0x5009
+#define PTRACE_DISABLE_TE            0x5010
+#define PTRACE_TE_ABORT_RAND         0x5011
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK     12      /* resume execution until next branch */
+
+/*
+ * PT_PROT definition is loosely based on hppa bsd definition in
+ * gdb/hppab-nat.c
+ */
+#define PTRACE_PROT                      21
+
+typedef enum {
+       ptprot_set_access_watchpoint,
+       ptprot_set_write_watchpoint,
+       ptprot_disable_watchpoint
+} ptprot_flags;
+
+typedef struct {
+       unsigned long lowaddr;
+       unsigned long hiaddr;
+       ptprot_flags prot;
+} ptprot_area;
+
+/* Sequence of bytes for breakpoint illegal instruction.  */
+#define S390_BREAKPOINT     {0x0,0x1}
+#define S390_BREAKPOINT_U16 ((__u16)0x0001)
+#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
+#define S390_SYSCALL_SIZE   2
+
+/*
+ * The user_regs_struct defines the way the user registers are
+ * store on the stack for signal handling.
+ */
+struct user_regs_struct {
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+       unsigned int  acrs[NUM_ACRS];
+       unsigned long orig_gpr2;
+       s390_fp_regs fp_regs;
+       /*
+        * These per registers are in here so that gdb can modify them
+        * itself as there is no "official" ptrace interface for hardware
+        * watchpoints. This is the way intel does it.
+        */
+       per_struct per_info;
+       unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_S390_PTRACE_H */
index 793690f..800104c 100644 (file)
 /*
  * Defines x86 CPU feature bits
  */
-#define NCAPINTS       18      /* N 32-bit words worth of info */
-#define NBUGINTS       1       /* N 32-bit bug flags */
+#define NCAPINTS                       18         /* N 32-bit words worth of info */
+#define NBUGINTS                       1          /* N 32-bit bug flags */
 
 /*
  * Note: If the comment begins with a quoted string, that string is used
  * in /proc/cpuinfo instead of the macro name.  If the string is "",
  * this feature bit is not displayed in /proc/cpuinfo at all.
+ *
+ * When adding new features here that depend on other features,
+ * please update the table in kernel/cpu/cpuid-deps.c as well.
  */
 
-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU                ( 0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME                ( 0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE         ( 0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE                ( 0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC                ( 0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR                ( 0*32+ 5) /* Model-Specific Registers */
-#define X86_FEATURE_PAE                ( 0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE                ( 0*32+ 7) /* Machine Check Exception */
-#define X86_FEATURE_CX8                ( 0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC       ( 0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP                ( 0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR       ( 0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE                ( 0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA                ( 0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV       ( 0*32+15) /* CMOV instructions */
-                                         /* (plus FCMOVcc, FCOMI with FPU) */
-#define X86_FEATURE_PAT                ( 0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36      ( 0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN         ( 0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLUSH    ( 0*32+19) /* CLFLUSH instruction */
-#define X86_FEATURE_DS         ( 0*32+21) /* "dts" Debug Store */
-#define X86_FEATURE_ACPI       ( 0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX                ( 0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR       ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
-#define X86_FEATURE_XMM                ( 0*32+25) /* "sse" */
-#define X86_FEATURE_XMM2       ( 0*32+26) /* "sse2" */
-#define X86_FEATURE_SELFSNOOP  ( 0*32+27) /* "ss" CPU self snoop */
-#define X86_FEATURE_HT         ( 0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC                ( 0*32+29) /* "tm" Automatic clock control */
-#define X86_FEATURE_IA64       ( 0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE                ( 0*32+31) /* Pending Break Enable */
+/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
+#define X86_FEATURE_FPU                        ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME                        ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE                 ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE                        ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC                        ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR                        ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE                        ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE                        ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8                        ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC               ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP                        ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR               ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE                        ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA                        ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV               ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT                        ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36              ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN                 ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH            ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS                 ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI               ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX                        ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR               ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM                        ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2               ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP          ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT                 ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC                        ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64               ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE                        ( 0*32+31) /* Pending Break Enable */
 
 /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
 /* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL    ( 1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP         ( 1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX         ( 1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT     ( 1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FXSR_OPT   ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
-#define X86_FEATURE_GBPAGES    ( 1*32+26) /* "pdpe1gb" GB pages */
-#define X86_FEATURE_RDTSCP     ( 1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM         ( 1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT   ( 1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW      ( 1*32+31) /* 3DNow! */
+#define X86_FEATURE_SYSCALL            ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP                 ( 1*32+19) /* MP Capable */
+#define X86_FEATURE_NX                 ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT             ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT           ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES            ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP             ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM                 ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
+#define X86_FEATURE_3DNOWEXT           ( 1*32+30) /* AMD 3DNow extensions */
+#define X86_FEATURE_3DNOW              ( 1*32+31) /* 3DNow */
 
 /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY   ( 2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN    ( 2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI       ( 2*32+ 3) /* LongRun table interface */
+#define X86_FEATURE_RECOVERY           ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN            ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI               ( 2*32+ 3) /* LongRun table interface */
 
 /* Other features, Linux-defined mapping, word 3 */
 /* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX      ( 3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR    ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR  ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR        ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-/* cpu types for specific tunings: */
-#define X86_FEATURE_K8         ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7         ( 3*32+ 5) /* "" Athlon */
-#define X86_FEATURE_P3         ( 3*32+ 6) /* "" P3 */
-#define X86_FEATURE_P4         ( 3*32+ 7) /* "" P4 */
-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP         ( 3*32+ 9) /* smp kernel running on up */
-#define X86_FEATURE_ART                ( 3*32+10) /* Platform has always running timer (ART) */
-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS       ( 3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS                ( 3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32  ( 3*32+14) /* "" syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
-#define X86_FEATURE_REP_GOOD   ( 3*32+16) /* rep microcode works well */
-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
-#define X86_FEATURE_ACC_POWER  ( 3*32+19) /* AMD Accumulated Power Mechanism */
-#define X86_FEATURE_NOPL       ( 3*32+20) /* The NOPL (0F 1F) instructions */
-#define X86_FEATURE_ALWAYS     ( 3*32+21) /* "" Always-present feature */
-#define X86_FEATURE_XTOPOLOGY  ( 3*32+22) /* cpu topology enum extensions */
-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
-#define X86_FEATURE_NONSTOP_TSC        ( 3*32+24) /* TSC does not stop in C states */
-#define X86_FEATURE_CPUID      ( 3*32+25) /* CPU has CPUID instruction itself */
-#define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 bits) */
-#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
+#define X86_FEATURE_CXMMX              ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR            ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR          ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR                ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+
+/* CPU types for specific tunings: */
+#define X86_FEATURE_K8                 ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7                 ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3                 ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4                 ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC       ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP                 ( 3*32+ 9) /* SMP kernel running on UP */
+#define X86_FEATURE_ART                        ( 3*32+10) /* Always running timer (ART) */
+#define X86_FEATURE_ARCH_PERFMON       ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS               ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS                        ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32          ( 3*32+14) /* "" syscall in IA32 userspace */
+#define X86_FEATURE_SYSENTER32         ( 3*32+15) /* "" sysenter in IA32 userspace */
+#define X86_FEATURE_REP_GOOD           ( 3*32+16) /* REP microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC       ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC       ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
+#define X86_FEATURE_ACC_POWER          ( 3*32+19) /* AMD Accumulated Power Mechanism */
+#define X86_FEATURE_NOPL               ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS             ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY          ( 3*32+22) /* CPU topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE       ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC                ( 3*32+24) /* TSC does not stop in C states */
+#define X86_FEATURE_CPUID              ( 3*32+25) /* CPU has CPUID instruction itself */
+#define X86_FEATURE_EXTD_APICID                ( 3*32+26) /* Extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM            ( 3*32+27) /* AMD multi-node processor */
+#define X86_FEATURE_APERFMPERF         ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
+#define X86_FEATURE_NONSTOP_TSC_S3     ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_TSC_KNOWN_FREQ     ( 3*32+31) /* TSC has known frequency */
 
-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3       ( 4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_PCLMULQDQ  ( 4*32+ 1) /* PCLMULQDQ instruction */
-#define X86_FEATURE_DTES64     ( 4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT      ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
-#define X86_FEATURE_DSCPL      ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
-#define X86_FEATURE_VMX                ( 4*32+ 5) /* Hardware virtualization */
-#define X86_FEATURE_SMX                ( 4*32+ 6) /* Safer mode */
-#define X86_FEATURE_EST                ( 4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2                ( 4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3      ( 4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_CID                ( 4*32+10) /* Context ID */
-#define X86_FEATURE_SDBG       ( 4*32+11) /* Silicon Debug */
-#define X86_FEATURE_FMA                ( 4*32+12) /* Fused multiply-add */
-#define X86_FEATURE_CX16       ( 4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR       ( 4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM       ( 4*32+15) /* Performance Capabilities */
-#define X86_FEATURE_PCID       ( 4*32+17) /* Process Context Identifiers */
-#define X86_FEATURE_DCA                ( 4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_XMM4_1     ( 4*32+19) /* "sse4_1" SSE-4.1 */
-#define X86_FEATURE_XMM4_2     ( 4*32+20) /* "sse4_2" SSE-4.2 */
-#define X86_FEATURE_X2APIC     ( 4*32+21) /* x2APIC */
-#define X86_FEATURE_MOVBE      ( 4*32+22) /* MOVBE instruction */
-#define X86_FEATURE_POPCNT      ( 4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
-#define X86_FEATURE_AES                ( 4*32+25) /* AES instructions */
-#define X86_FEATURE_XSAVE      ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
-#define X86_FEATURE_OSXSAVE    ( 4*32+27) /* "" XSAVE enabled in the OS */
-#define X86_FEATURE_AVX                ( 4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C       ( 4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRAND     ( 4*32+30) /* The RDRAND instruction */
-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
+#define X86_FEATURE_XMM3               ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ          ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64             ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT              ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
+#define X86_FEATURE_DSCPL              ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
+#define X86_FEATURE_VMX                        ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX                        ( 4*32+ 6) /* Safer Mode eXtensions */
+#define X86_FEATURE_EST                        ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2                        ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3              ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID                        ( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG               ( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA                        ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16               ( 4*32+13) /* CMPXCHG16B instruction */
+#define X86_FEATURE_XTPR               ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM               ( 4*32+15) /* Perf/Debug Capabilities MSR */
+#define X86_FEATURE_PCID               ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA                        ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1             ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2             ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC             ( 4*32+21) /* X2APIC */
+#define X86_FEATURE_MOVBE              ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT             ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
+#define X86_FEATURE_AES                        ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE              ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
+#define X86_FEATURE_OSXSAVE            ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
+#define X86_FEATURE_AVX                        ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C               ( 4*32+29) /* 16-bit FP conversions */
+#define X86_FEATURE_RDRAND             ( 4*32+30) /* RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR         ( 4*32+31) /* Running on a hypervisor */
 
 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE     ( 5*32+ 2) /* "rng" RNG present (xstore) */
-#define X86_FEATURE_XSTORE_EN  ( 5*32+ 3) /* "rng_en" RNG enabled */
-#define X86_FEATURE_XCRYPT     ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
-#define X86_FEATURE_XCRYPT_EN  ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
-#define X86_FEATURE_ACE2       ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN    ( 5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE                ( 5*32+10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN     ( 5*32+11) /* PHE enabled */
-#define X86_FEATURE_PMM                ( 5*32+12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN     ( 5*32+13) /* PMM enabled */
+#define X86_FEATURE_XSTORE             ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN          ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT             ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN          ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2               ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN            ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE                        ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN             ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM                        ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN             ( 5*32+13) /* PMM enabled */
 
-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM    ( 6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM                ( 6*32+ 2) /* Secure virtual machine */
-#define X86_FEATURE_EXTAPIC    ( 6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM                ( 6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A      ( 6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW       ( 6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS                ( 6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP                ( 6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT     ( 6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT                ( 6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP                ( 6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4       ( 6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_TCE                ( 6*32+17) /* translation cache extension */
-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM                ( 6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT    ( 6*32+22) /* topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
-#define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
-#define X86_FEATURE_BPEXT      (6*32+26) /* data breakpoint extension */
-#define X86_FEATURE_PTSC       ( 6*32+27) /* performance time-stamp counter */
-#define X86_FEATURE_PERFCTR_LLC        ( 6*32+28) /* Last Level Cache performance counter extensions */
-#define X86_FEATURE_MWAITX     ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
+#define X86_FEATURE_LAHF_LM            ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY         ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM                        ( 6*32+ 2) /* Secure Virtual Machine */
+#define X86_FEATURE_EXTAPIC            ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY         ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM                        ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A              ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE                ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH      ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW               ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS                        ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP                        ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT             ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT                        ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP                        ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4               ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE                        ( 6*32+17) /* Translation Cache Extension */
+#define X86_FEATURE_NODEID_MSR         ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM                        ( 6*32+21) /* Trailing Bit Manipulations */
+#define X86_FEATURE_TOPOEXT            ( 6*32+22) /* Topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE       ( 6*32+23) /* Core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB         ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT              ( 6*32+26) /* Data breakpoint extension */
+#define X86_FEATURE_PTSC               ( 6*32+27) /* Performance time-stamp counter */
+#define X86_FEATURE_PERFCTR_LLC                ( 6*32+28) /* Last Level Cache performance counter extensions */
+#define X86_FEATURE_MWAITX             ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
 
 /*
  * Auxiliary flags: Linux defined - For features scattered in various
  *
  * Reuse free bits when adding new feature flags!
  */
-#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */
-#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
-#define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
-#define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
-#define X86_FEATURE_CAT_L3     ( 7*32+ 4) /* Cache Allocation Technology L3 */
-#define X86_FEATURE_CAT_L2     ( 7*32+ 5) /* Cache Allocation Technology L2 */
-#define X86_FEATURE_CDP_L3     ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_RING3MWAIT         ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
+#define X86_FEATURE_CPUID_FAULT                ( 7*32+ 1) /* Intel CPUID faulting */
+#define X86_FEATURE_CPB                        ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB                        ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3             ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2             ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3             ( 7*32+ 6) /* Code and Data Prioritization L3 */
 
-#define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_SME                ( 7*32+10) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_HW_PSTATE          ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_SME                        ( 7*32+10) /* AMD Secure Memory Encryption */
 
-#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
+#define X86_FEATURE_INTEL_PT           ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW      ( 7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS      ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
-#define X86_FEATURE_MBA         ( 7*32+18) /* Memory Bandwidth Allocation */
+#define X86_FEATURE_MBA                        ( 7*32+18) /* Memory Bandwidth Allocation */
 
 /* Virtualization flags: Linux defined, word 8 */
-#define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT         ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID        ( 8*32+ 4) /* Intel Virtual Processor ID */
+#define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI               ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY       ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT                        ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID               ( 8*32+ 4) /* Intel Virtual Processor ID */
 
-#define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
-#define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */
+#define X86_FEATURE_VMMCALL            ( 8*32+15) /* Prefer VMMCALL to VMCALL */
+#define X86_FEATURE_XENPV              ( 8*32+16) /* "" Xen paravirtual guest */
 
 
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
-#define X86_FEATURE_FSGSBASE   ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
-#define X86_FEATURE_BMI1       ( 9*32+ 3) /* 1st group bit manipulation extensions */
-#define X86_FEATURE_HLE                ( 9*32+ 4) /* Hardware Lock Elision */
-#define X86_FEATURE_AVX2       ( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP       ( 9*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_BMI2       ( 9*32+ 8) /* 2nd group bit manipulation extensions */
-#define X86_FEATURE_ERMS       ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
-#define X86_FEATURE_INVPCID    ( 9*32+10) /* Invalidate Processor Context ID */
-#define X86_FEATURE_RTM                ( 9*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CQM                ( 9*32+12) /* Cache QoS Monitoring */
-#define X86_FEATURE_MPX                ( 9*32+14) /* Memory Protection Extension */
-#define X86_FEATURE_RDT_A      ( 9*32+15) /* Resource Director Technology Allocation */
-#define X86_FEATURE_AVX512F    ( 9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_AVX512DQ   ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
-#define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
-#define X86_FEATURE_ADX                ( 9*32+19) /* The ADCX and ADOX instructions */
-#define X86_FEATURE_SMAP       ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_AVX512IFMA  ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
-#define X86_FEATURE_CLWB       ( 9*32+24) /* CLWB instruction */
-#define X86_FEATURE_AVX512PF   ( 9*32+26) /* AVX-512 Prefetch */
-#define X86_FEATURE_AVX512ER   ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
-#define X86_FEATURE_AVX512CD   ( 9*32+28) /* AVX-512 Conflict Detection */
-#define X86_FEATURE_SHA_NI     ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
-#define X86_FEATURE_AVX512BW   ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
-#define X86_FEATURE_AVX512VL   ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
+#define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
+#define X86_FEATURE_TSC_ADJUST         ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
+#define X86_FEATURE_BMI1               ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE                        ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2               ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP               ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2               ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS               ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
+#define X86_FEATURE_INVPCID            ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM                        ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM                        ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX                        ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A              ( 9*32+15) /* Resource Director Technology Allocation */
+#define X86_FEATURE_AVX512F            ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ           ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
+#define X86_FEATURE_RDSEED             ( 9*32+18) /* RDSEED instruction */
+#define X86_FEATURE_ADX                        ( 9*32+19) /* ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP               ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_AVX512IFMA         ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
+#define X86_FEATURE_CLFLUSHOPT         ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB               ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF           ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER           ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD           ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI             ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+#define X86_FEATURE_AVX512BW           ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
+#define X86_FEATURE_AVX512VL           ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
 
-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
-#define X86_FEATURE_XSAVEOPT   (10*32+ 0) /* XSAVEOPT */
-#define X86_FEATURE_XSAVEC     (10*32+ 1) /* XSAVEC */
-#define X86_FEATURE_XGETBV1    (10*32+ 2) /* XGETBV with ECX = 1 */
-#define X86_FEATURE_XSAVES     (10*32+ 3) /* XSAVES/XRSTORS */
+/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
+#define X86_FEATURE_XSAVEOPT           (10*32+ 0) /* XSAVEOPT instruction */
+#define X86_FEATURE_XSAVEC             (10*32+ 1) /* XSAVEC instruction */
+#define X86_FEATURE_XGETBV1            (10*32+ 2) /* XGETBV with ECX = 1 instruction */
+#define X86_FEATURE_XSAVES             (10*32+ 3) /* XSAVES/XRSTORS instructions */
 
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
-#define X86_FEATURE_CQM_LLC    (11*32+ 1) /* LLC QoS if 1 */
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
+#define X86_FEATURE_CQM_LLC            (11*32+ 1) /* LLC QoS if 1 */
 
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC      (12*32+ 0) /* LLC occupancy monitoring */
+#define X86_FEATURE_CQM_MBM_TOTAL      (12*32+ 1) /* LLC Total MBM monitoring */
+#define X86_FEATURE_CQM_MBM_LOCAL      (12*32+ 2) /* LLC Local MBM monitoring */
 
-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
-#define X86_FEATURE_CLZERO     (13*32+0) /* CLZERO instruction */
-#define X86_FEATURE_IRPERF     (13*32+1) /* Instructions Retired Count */
+/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
+#define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
+#define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired Count */
+#define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP error pointers */
 
-/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
-#define X86_FEATURE_DTHERM     (14*32+ 0) /* Digital Thermal Sensor */
-#define X86_FEATURE_IDA                (14*32+ 1) /* Intel Dynamic Acceleration */
-#define X86_FEATURE_ARAT       (14*32+ 2) /* Always Running APIC Timer */
-#define X86_FEATURE_PLN                (14*32+ 4) /* Intel Power Limit Notification */
-#define X86_FEATURE_PTS                (14*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_HWP                (14*32+ 7) /* Intel Hardware P-states */
-#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
-#define X86_FEATURE_HWP_EPP    (14*32+10) /* HWP Energy Perf. Preference */
-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+#define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA                        (14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT               (14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN                        (14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS                        (14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP                        (14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY         (14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW     (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP            (14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ                (14*32+11) /* HWP Package Level Request */
 
-/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
-#define X86_FEATURE_NPT                (15*32+ 0) /* Nested Page Table support */
-#define X86_FEATURE_LBRV       (15*32+ 1) /* LBR Virtualization support */
-#define X86_FEATURE_SVML       (15*32+ 2) /* "svm_lock" SVM locking MSR */
-#define X86_FEATURE_NRIPS      (15*32+ 3) /* "nrip_save" SVM next_rip save */
-#define X86_FEATURE_TSCRATEMSR  (15*32+ 4) /* "tsc_scale" TSC scaling support */
-#define X86_FEATURE_VMCBCLEAN   (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
-#define X86_FEATURE_AVIC       (15*32+13) /* Virtual Interrupt Controller */
-#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
-#define X86_FEATURE_VGIF       (15*32+16) /* Virtual GIF */
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
+#define X86_FEATURE_NPT                        (15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV               (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML               (15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS              (15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR         (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN          (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID                (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS      (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER                (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD                (15*32+12) /* pause filter threshold */
+#define X86_FEATURE_AVIC               (15*32+13) /* Virtual Interrupt Controller */
+#define X86_FEATURE_V_VMSAVE_VMLOAD    (15*32+15) /* Virtual VMSAVE VMLOAD */
+#define X86_FEATURE_VGIF               (15*32+16) /* Virtual GIF */
 
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
-#define X86_FEATURE_AVX512VBMI  (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
-#define X86_FEATURE_PKU                (16*32+ 3) /* Protection Keys for Userspace */
-#define X86_FEATURE_OSPKE      (16*32+ 4) /* OS Protection Keys Enable */
-#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
-#define X86_FEATURE_LA57       (16*32+16) /* 5-level page tables */
-#define X86_FEATURE_RDPID      (16*32+22) /* RDPID instruction */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
+#define X86_FEATURE_AVX512VBMI         (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
+#define X86_FEATURE_UMIP               (16*32+ 2) /* User Mode Instruction Protection */
+#define X86_FEATURE_PKU                        (16*32+ 3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE              (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_AVX512_VBMI2       (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
+#define X86_FEATURE_GFNI               (16*32+ 8) /* Galois Field New Instructions */
+#define X86_FEATURE_VAES               (16*32+ 9) /* Vector AES */
+#define X86_FEATURE_VPCLMULQDQ         (16*32+10) /* Carry-Less Multiplication Double Quadword */
+#define X86_FEATURE_AVX512_VNNI                (16*32+11) /* Vector Neural Network Instructions */
+#define X86_FEATURE_AVX512_BITALG      (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
+#define X86_FEATURE_AVX512_VPOPCNTDQ   (16*32+14) /* POPCNT for vectors of DW/QW */
+#define X86_FEATURE_LA57               (16*32+16) /* 5-level page tables */
+#define X86_FEATURE_RDPID              (16*32+22) /* RDPID instruction */
 
-/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
-#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
-#define X86_FEATURE_SUCCOR     (17*32+1) /* Uncorrectable error containment and recovery */
-#define X86_FEATURE_SMCA       (17*32+3) /* Scalable MCA */
+/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
+#define X86_FEATURE_OVERFLOW_RECOV     (17*32+ 0) /* MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR             (17*32+ 1) /* Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA               (17*32+ 3) /* Scalable MCA */
 
 /*
  * BUG word(s)
  */
-#define X86_BUG(x)             (NCAPINTS*32 + (x))
+#define X86_BUG(x)                     (NCAPINTS*32 + (x))
 
-#define X86_BUG_F00F           X86_BUG(0) /* Intel F00F */
-#define X86_BUG_FDIV           X86_BUG(1) /* FPU FDIV */
-#define X86_BUG_COMA           X86_BUG(2) /* Cyrix 6x86 coma */
-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
-#define X86_BUG_AMD_APIC_C1E   X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
-#define X86_BUG_11AP           X86_BUG(5) /* Bad local APIC aka 11AP */
-#define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
-#define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG_F00F                   X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV                   X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA                   X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH         X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E           X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP                   X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK            X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR                X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS                X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
 #ifdef CONFIG_X86_32
 /*
  * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
  * to avoid confusion.
  */
-#define X86_BUG_ESPFIX         X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
+#define X86_BUG_ESPFIX                 X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
 #endif
-#define X86_BUG_NULL_SEG       X86_BUG(10) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
-#define X86_BUG_MONITOR                X86_BUG(12) /* IPI required to wake up remote CPU */
-#define X86_BUG_AMD_E400       X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_NULL_SEG               X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE           X86_BUG(11) /* SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR                        X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400               X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+
 #endif /* _ASM_X86_CPUFEATURES_H */
index c10c912..14d6d50 100644 (file)
 # define DISABLE_MPX   (1<<(X86_FEATURE_MPX & 31))
 #endif
 
+#ifdef CONFIG_X86_INTEL_UMIP
+# define DISABLE_UMIP  0
+#else
+# define DISABLE_UMIP  (1<<(X86_FEATURE_UMIP & 31))
+#endif
+
 #ifdef CONFIG_X86_64
 # define DISABLE_VME           (1<<(X86_FEATURE_VME & 31))
 # define DISABLE_K6_MTRR       (1<<(X86_FEATURE_K6_MTRR & 31))
@@ -63,7 +69,7 @@
 #define DISABLED_MASK13        0
 #define DISABLED_MASK14        0
 #define DISABLED_MASK15        0
-#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
+#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
 #define DISABLED_MASK17        0
 #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
index bde77d7..37292bb 100644 (file)
@@ -6,7 +6,7 @@ RM ?= rm -f
 
 # Make the path relative to DESTDIR, not prefix
 ifndef DESTDIR
-prefix?=$(HOME)
+prefix ?= /usr/local
 endif
 mandir ?= $(prefix)/share/man
 man8dir = $(mandir)/man8
index 813826c..ec3052c 100644 (file)
@@ -45,8 +45,8 @@ $(LIBBPF)-clean:
        $(call QUIET_CLEAN, libbpf)
        $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
 
-prefix = /usr
-bash_compdir ?= $(prefix)/share/bash-completion/completions
+prefix = /usr/local
+bash_compdir ?= /usr/share/bash-completion/completions
 
 CC = gcc
 
@@ -76,6 +76,7 @@ clean: $(LIBBPF)-clean
        $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
 
 install:
+       install -m 0755 -d $(prefix)/sbin
        install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
        install -m 0755 -d $(bash_compdir)
        install -m 0644 bash-completion/bpftool $(bash_compdir)
@@ -88,5 +89,5 @@ doc-install:
 
 FORCE:
 
-.PHONY: all clean FORCE
+.PHONY: all clean FORCE install doc doc-install
 .DEFAULT_GOAL := all
index d6e4762..d294bc8 100644 (file)
@@ -58,11 +58,19 @@ bool show_pinned;
 struct pinned_obj_table prog_table;
 struct pinned_obj_table map_table;
 
+static void __noreturn clean_and_exit(int i)
+{
+       if (json_output)
+               jsonw_destroy(&json_wtr);
+
+       exit(i);
+}
+
 void usage(void)
 {
        last_do_help(last_argc - 1, last_argv + 1);
 
-       exit(-1);
+       clean_and_exit(-1);
 }
 
 static int do_help(int argc, char **argv)
@@ -280,6 +288,7 @@ int main(int argc, char **argv)
        hash_init(prog_table.table);
        hash_init(map_table.table);
 
+       opterr = 0;
        while ((opt = getopt_long(argc, argv, "Vhpjf",
                                  options, NULL)) >= 0) {
                switch (opt) {
@@ -291,13 +300,25 @@ int main(int argc, char **argv)
                        pretty_output = true;
                        /* fall through */
                case 'j':
-                       json_output = true;
+                       if (!json_output) {
+                               json_wtr = jsonw_new(stdout);
+                               if (!json_wtr) {
+                                       p_err("failed to create JSON writer");
+                                       return -1;
+                               }
+                               json_output = true;
+                       }
+                       jsonw_pretty(json_wtr, pretty_output);
                        break;
                case 'f':
                        show_pinned = true;
                        break;
                default:
-                       usage();
+                       p_err("unrecognized option '%s'", argv[optind - 1]);
+                       if (json_output)
+                               clean_and_exit(-1);
+                       else
+                               usage();
                }
        }
 
@@ -306,15 +327,6 @@ int main(int argc, char **argv)
        if (argc < 0)
                usage();
 
-       if (json_output) {
-               json_wtr = jsonw_new(stdout);
-               if (!json_wtr) {
-                       p_err("failed to create JSON writer");
-                       return -1;
-               }
-               jsonw_pretty(json_wtr, pretty_output);
-       }
-
        bfd_init();
 
        ret = cmd_select(cmds, argc, argv, do_help);
index 9c191e2..bff330b 100644 (file)
@@ -41,6 +41,7 @@
 #include <stdbool.h>
 #include <stdio.h>
 #include <linux/bpf.h>
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/hashtable.h>
 
@@ -50,7 +51,7 @@
 
 #define NEXT_ARG()     ({ argc--; argv++; if (argc < 0) usage(); })
 #define NEXT_ARGP()    ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
-#define BAD_ARG()      ({ p_err("what is '%s'?\n", *argv); -1; })
+#define BAD_ARG()      ({ p_err("what is '%s'?", *argv); -1; })
 
 #define ERR_MAX_LEN    1024
 
@@ -80,7 +81,7 @@ void p_info(const char *fmt, ...);
 
 bool is_prefix(const char *pfx, const char *str);
 void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
-void usage(void) __attribute__((noreturn));
+void usage(void) __noreturn;
 
 struct pinned_obj_table {
        DECLARE_HASHTABLE(table, 16);
index e2450c8..a8c3a33 100644 (file)
@@ -523,21 +523,23 @@ static int do_show(int argc, char **argv)
                                break;
                        p_err("can't get next map: %s%s", strerror(errno),
                              errno == EINVAL ? " -- kernel too old?" : "");
-                       return -1;
+                       break;
                }
 
                fd = bpf_map_get_fd_by_id(id);
                if (fd < 0) {
+                       if (errno == ENOENT)
+                               continue;
                        p_err("can't get map by id (%u): %s",
                              id, strerror(errno));
-                       return -1;
+                       break;
                }
 
                err = bpf_obj_get_info_by_fd(fd, &info, &len);
                if (err) {
                        p_err("can't get map info: %s", strerror(errno));
                        close(fd);
-                       return -1;
+                       break;
                }
 
                if (json_output)
index ad619b9..dded773 100644 (file)
@@ -382,6 +382,8 @@ static int do_show(int argc, char **argv)
 
                fd = bpf_prog_get_fd_by_id(id);
                if (fd < 0) {
+                       if (errno == ENOENT)
+                               continue;
                        p_err("can't get prog by id (%u): %s",
                              id, strerror(errno));
                        err = -1;
index eaa3bec..4c99c57 100644 (file)
@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
        for (;;) {
                readp = &record[records_read];
                records_read += fread(readp, sizeof(struct kvp_record),
-                                       ENTRIES_PER_BLOCK * num_blocks,
-                                       filep);
+                               ENTRIES_PER_BLOCK * num_blocks - records_read,
+                               filep);
 
                if (ferror(filep)) {
-                       syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
+                       syslog(LOG_ERR,
+                               "Failed to read file, pool: %d; error: %d %s",
+                                pool, errno, strerror(errno));
+                       kvp_release_lock(pool);
                        exit(EXIT_FAILURE);
                }
 
@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
 
                        if (record == NULL) {
                                syslog(LOG_ERR, "malloc failed");
+                               kvp_release_lock(pool);
                                exit(EXIT_FAILURE);
                        }
                        continue;
@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
        fclose(filep);
        kvp_release_lock(pool);
 }
+
 static int kvp_file_init(void)
 {
        int  fd;
-       FILE *filep;
-       size_t records_read;
        char *fname;
-       struct kvp_record *record;
-       struct kvp_record *readp;
-       int num_blocks;
        int i;
        int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
 
@@ -246,61 +246,19 @@ static int kvp_file_init(void)
 
        for (i = 0; i < KVP_POOL_COUNT; i++) {
                fname = kvp_file_info[i].fname;
-               records_read = 0;
-               num_blocks = 1;
                sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
                fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
 
                if (fd == -1)
                        return 1;
 
-
-               filep = fopen(fname, "re");
-               if (!filep) {
-                       close(fd);
-                       return 1;
-               }
-
-               record = malloc(alloc_unit * num_blocks);
-               if (record == NULL) {
-                       fclose(filep);
-                       close(fd);
-                       return 1;
-               }
-               for (;;) {
-                       readp = &record[records_read];
-                       records_read += fread(readp, sizeof(struct kvp_record),
-                                       ENTRIES_PER_BLOCK,
-                                       filep);
-
-                       if (ferror(filep)) {
-                               syslog(LOG_ERR, "Failed to read file, pool: %d",
-                                      i);
-                               exit(EXIT_FAILURE);
-                       }
-
-                       if (!feof(filep)) {
-                               /*
-                                * We have more data to read.
-                                */
-                               num_blocks++;
-                               record = realloc(record, alloc_unit *
-                                               num_blocks);
-                               if (record == NULL) {
-                                       fclose(filep);
-                                       close(fd);
-                                       return 1;
-                               }
-                               continue;
-                       }
-                       break;
-               }
                kvp_file_info[i].fd = fd;
-               kvp_file_info[i].num_blocks = num_blocks;
-               kvp_file_info[i].records = record;
-               kvp_file_info[i].num_records = records_read;
-               fclose(filep);
-
+               kvp_file_info[i].num_blocks = 1;
+               kvp_file_info[i].records = malloc(alloc_unit);
+               if (kvp_file_info[i].records == NULL)
+                       return 1;
+               kvp_file_info[i].num_records = 0;
+               kvp_update_mem_state(i);
        }
 
        return 0;
index 07fd03c..04e32f9 100644 (file)
@@ -84,8 +84,6 @@
 
 #define uninitialized_var(x) x = *(&(x))
 
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
-
 #include <linux/types.h>
 
 /*
@@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- * compiler is aware of some particular ordering.  One way to make the
- * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
  *
- * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- * data types like structs or unions. If the size of the accessed data
- * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
- * compile-time warning.
+ * These two macros will also work on aggregate data types like structs or
+ * unions. If the size of the accessed data type exceeds the word size of
+ * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
+ * fall back to memcpy and print a compile-time warning.
  *
  * Their two major use cases are: (1) Mediating communication between
  * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
  * mutilate accesses that either do not require ordering or that interact
  * with an explicit memory barrier or atomic instruction that provides the
  * required ordering.
diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
index 940c1b0..6b0c36a 100644 (file)
@@ -48,6 +48,7 @@ static inline int debug_locks_off(void)
 #define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
 #define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
 #define pr_warn pr_err
+#define pr_cont pr_err
 
 #define list_del_rcu list_del
 
diff --git a/tools/include/uapi/asm-generic/bpf_perf_event.h b/tools/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..53815d2
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+
+#include <linux/ptrace.h>
+
+/* Export kernel pt_regs structure */
+typedef struct pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
index 2dffcbf..653687d 100644 (file)
@@ -13,6 +13,7 @@
 #define MAP_NONBLOCK   0x10000         /* do not block on IO */
 #define MAP_STACK      0x20000         /* give out an address that is best suited for process/thread stacks */
 #define MAP_HUGETLB    0x40000         /* create a huge page mapping */
+#define MAP_SYNC       0x80000         /* perform synchronous page faults for the mapping */
 
 /* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
 
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..13a5853
--- /dev/null
@@ -0,0 +1,7 @@
+#if defined(__aarch64__)
+#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__s390__)
+#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
+#else
+#include <uapi/asm-generic/bpf_perf_event.h>
+#endif
index 97677cd..6fdff59 100644 (file)
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
        __u32 pad;
 };
 
+/* Query current scanout sequence number */
+struct drm_crtc_get_sequence {
+       __u32 crtc_id;          /* requested crtc_id */
+       __u32 active;           /* return: crtc output is active */
+       __u64 sequence;         /* return: most recent vblank sequence */
+       __s64 sequence_ns;      /* return: most recent time of first pixel out */
+};
+
+/* Queue event to be delivered at specified sequence. Time stamp marks
+ * when the first pixel of the refresh cycle leaves the display engine
+ * for the display
+ */
+#define DRM_CRTC_SEQUENCE_RELATIVE             0x00000001      /* sequence is relative to current */
+#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS         0x00000002      /* Use next sequence if we've missed */
+
+struct drm_crtc_queue_sequence {
+       __u32 crtc_id;
+       __u32 flags;
+       __u64 sequence;         /* on input, target sequence. on output, actual sequence */
+       __u64 user_data;        /* user data passed to event */
+};
+
 #if defined(__cplusplus)
 }
 #endif
@@ -819,6 +841,9 @@ extern "C" {
 
 #define DRM_IOCTL_WAIT_VBLANK          DRM_IOWR(0x3a, union drm_wait_vblank)
 
+#define DRM_IOCTL_CRTC_GET_SEQUENCE    DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
+#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE  DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
+
 #define DRM_IOCTL_UPDATE_DRAW          DRM_IOW(0x3f, struct drm_update_draw)
 
 #define DRM_IOCTL_MODE_GETRESOURCES    DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -863,6 +888,11 @@ extern "C" {
 #define DRM_IOCTL_SYNCOBJ_RESET                DRM_IOWR(0xC4, struct drm_syncobj_array)
 #define DRM_IOCTL_SYNCOBJ_SIGNAL       DRM_IOWR(0xC5, struct drm_syncobj_array)
 
+#define DRM_IOCTL_MODE_CREATE_LEASE    DRM_IOWR(0xC6, struct drm_mode_create_lease)
+#define DRM_IOCTL_MODE_LIST_LESSEES    DRM_IOWR(0xC7, struct drm_mode_list_lessees)
+#define DRM_IOCTL_MODE_GET_LEASE       DRM_IOWR(0xC8, struct drm_mode_get_lease)
+#define DRM_IOCTL_MODE_REVOKE_LEASE    DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
@@ -893,6 +923,7 @@ struct drm_event {
 
 #define DRM_EVENT_VBLANK 0x01
 #define DRM_EVENT_FLIP_COMPLETE 0x02
+#define DRM_EVENT_CRTC_SEQUENCE        0x03
 
 struct drm_event_vblank {
        struct drm_event base;
@@ -903,6 +934,16 @@ struct drm_event_vblank {
        __u32 crtc_id; /* 0 on older kernels that do not support this */
 };
 
+/* Event delivered at sequence. Time stamp marks when the first pixel
+ * of the refresh cycle leaves the display engine for the display
+ */
+struct drm_event_crtc_sequence {
+       struct drm_event        base;
+       __u64                   user_data;
+       __s64                   time_ns;
+       __u64                   sequence;
+};
+
 /* typedef area */
 #ifndef __KERNEL__
 typedef struct drm_clip_rect drm_clip_rect_t;
index 9816590..ac3c650 100644 (file)
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_MIN_EU_IN_POOL       39
 #define I915_PARAM_MMAP_GTT_VERSION     40
 
-/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
  * priorities and the driver will attempt to execute batches in priority order.
+ * The param returns a capability bitmask, nonzero implies that the scheduler
+ * is enabled, with different features present according to the mask.
+ *
+ * The initial priority for each batch is supplied by the context and is
+ * controlled via I915_CONTEXT_PARAM_PRIORITY.
  */
 #define I915_PARAM_HAS_SCHEDULER        41
+#define   I915_SCHEDULER_CAP_ENABLED   (1ul << 0)
+#define   I915_SCHEDULER_CAP_PRIORITY  (1ul << 1)
+#define   I915_SCHEDULER_CAP_PREEMPTION        (1ul << 2)
+
 #define I915_PARAM_HUC_STATUS           42
 
 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
         * be specified
         */
        __u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
        __u64 val; /* Return value */
 };
 /* Known registers:
  *
  * Render engine timestamp - 0x2358 + 64bit - gen7+
  * - Note this register returns an invalid value if using the default
- *   single instruction 8byte read, in order to workaround that use
- *   offset (0x2538 | 1) instead.
+ *   single instruction 8byte read, in order to workaround that pass
+ *   flag I915_REG_READ_8B_WA in offset field.
  *
  */
 
@@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
 #define I915_CONTEXT_PARAM_GTT_SIZE    0x3
 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE    0x4
 #define I915_CONTEXT_PARAM_BANNABLE    0x5
+#define I915_CONTEXT_PARAM_PRIORITY    0x6
+#define   I915_CONTEXT_MAX_USER_PRIORITY       1023 /* inclusive */
+#define   I915_CONTEXT_DEFAULT_PRIORITY                0
+#define   I915_CONTEXT_MIN_USER_PRIORITY       -1023 /* inclusive */
        __u64 value;
 };
 
@@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
        __u32 n_boolean_regs;
        __u32 n_flex_regs;
 
-       __u64 __user mux_regs_ptr;
-       __u64 __user boolean_regs_ptr;
-       __u64 __user flex_regs_ptr;
+       /*
+        * These fields are pointers to tuples of u32 values (register
+        * address, value). For example the expected length of the buffer
+        * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+        */
+       __u64 mux_regs_ptr;
+       __u64 boolean_regs_ptr;
+       __u64 flex_regs_ptr;
 };
 
 #if defined(__cplusplus)
index 0674272..8f95303 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /* Copyright (c) 2016 Facebook
  *
  * This program is free software; you can redistribute it and/or
@@ -7,11 +8,10 @@
 #ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
 #define _UAPI__LINUX_BPF_PERF_EVENT_H__
 
-#include <linux/types.h>
-#include <linux/ptrace.h>
+#include <asm/bpf_perf_event.h>
 
 struct bpf_perf_event_data {
-       struct pt_regs regs;
+       bpf_user_pt_regs_t regs;
        __u64 sample_period;
 };
 
index 481e103..ef13050 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _UAPI_LINUX_KCMP_H
 #define _UAPI_LINUX_KCMP_H
 
index 7e99999..496e59a 100644 (file)
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
 
 struct kvm_s390_irq_state {
        __u64 buf;
-       __u32 flags;
+       __u32 flags;        /* will stay unused for compatibility reasons */
        __u32 len;
-       __u32 reserved[4];
+       __u32 reserved[4];  /* will stay unused for compatibility reasons */
 };
 
 /* for KVM_SET_GUEST_DEBUG */
@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_PPC_SMT_POSSIBLE 147
 #define KVM_CAP_HYPERV_SYNIC2 148
 #define KVM_CAP_HYPERV_VP_INDEX 149
+#define KVM_CAP_S390_AIS_MIGRATION 150
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 362493a..b9a4953 100644 (file)
@@ -942,6 +942,7 @@ enum perf_callchain_context {
 #define PERF_AUX_FLAG_TRUNCATED                0x01    /* record was truncated to fit */
 #define PERF_AUX_FLAG_OVERWRITE                0x02    /* snapshot from overwrite mode */
 #define PERF_AUX_FLAG_PARTIAL          0x04    /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION                0x08    /* sample collided with another */
 
 #define PERF_FLAG_FD_NO_GROUP          (1UL << 0)
 #define PERF_FLAG_FD_OUTPUT            (1UL << 1)
index a8d0759..af5f8c2 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _LINUX_PRCTL_H
 #define _LINUX_PRCTL_H
 
@@ -197,4 +198,13 @@ struct prctl_mm_map {
 # define PR_CAP_AMBIENT_LOWER          3
 # define PR_CAP_AMBIENT_CLEAR_ALL      4
 
+/* arm64 Scalable Vector Extension controls */
+/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
+#define PR_SVE_SET_VL                  50      /* set task vector length */
+# define PR_SVE_SET_VL_ONEXEC          (1 << 18) /* defer effect until exec */
+#define PR_SVE_GET_VL                  51      /* get task vector length */
+/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
+# define PR_SVE_VL_LEN_MASK            0xffff
+# define PR_SVE_VL_INHERIT             (1 << 17) /* inherit across exec */
+
 #endif /* _LINUX_PRCTL_H */
index 217cf6f..a5684d0 100755 (executable)
@@ -478,7 +478,7 @@ class Provider(object):
     @staticmethod
     def is_field_wanted(fields_filter, field):
         """Indicate whether field is valid according to fields_filter."""
-        if not fields_filter or fields_filter == "help":
+        if not fields_filter:
             return True
         return re.match(fields_filter, field) is not None
 
@@ -549,8 +549,8 @@ class TracepointProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self._fields = [field for field in self.get_available_fields()
-                        if self.is_field_wanted(fields_filter, field)]
+        self.fields = [field for field in self.get_available_fields()
+                       if self.is_field_wanted(fields_filter, field)]
 
     @staticmethod
     def get_online_cpus():
@@ -950,7 +950,8 @@ class Tui(object):
             curses.nocbreak()
             curses.endwin()
 
-    def get_all_gnames(self):
+    @staticmethod
+    def get_all_gnames():
         """Returns a list of (pid, gname) tuples of all running guests"""
         res = []
         try:
@@ -963,7 +964,7 @@ class Tui(object):
             # perform a sanity check before calling the more expensive
             # function to possibly extract the guest name
             if ' -name ' in line[1]:
-                res.append((line[0], self.get_gname_from_pid(line[0])))
+                res.append((line[0], Tui.get_gname_from_pid(line[0])))
         child.stdout.close()
 
         return res
@@ -984,7 +985,8 @@ class Tui(object):
         except Exception:
             self.screen.addstr(row + 1, 2, 'Not available')
 
-    def get_pid_from_gname(self, gname):
+    @staticmethod
+    def get_pid_from_gname(gname):
         """Fuzzy function to convert guest name to QEMU process pid.
 
         Returns a list of potential pids, can be empty if no match found.
@@ -992,7 +994,7 @@ class Tui(object):
 
         """
         pids = []
-        for line in self.get_all_gnames():
+        for line in Tui.get_all_gnames():
             if gname == line[1]:
                 pids.append(int(line[0]))
 
@@ -1090,15 +1092,16 @@ class Tui(object):
             # sort by totals
             return (0, -stats[x][0])
         total = 0.
-        for val in stats.values():
-            total += val[0]
+        for key in stats.keys():
+            if key.find('(') is -1:
+                total += stats[key][0]
         if self._sorting == SORT_DEFAULT:
             sortkey = sortCurAvg
         else:
             sortkey = sortTotal
+        tavg = 0
         for key in sorted(stats.keys(), key=sortkey):
-
-            if row >= self.screen.getmaxyx()[0]:
+            if row >= self.screen.getmaxyx()[0] - 1:
                 break
             values = stats[key]
             if not values[0] and not values[1]:
@@ -1110,9 +1113,15 @@ class Tui(object):
                 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
                                    (key, values[0], values[0] * 100 / total,
                                     cur))
+                if cur is not '' and key.find('(') is -1:
+                    tavg += cur
             row += 1
         if row == 3:
             self.screen.addstr(4, 1, 'No matching events reported yet')
+        else:
+            self.screen.addstr(row, 1, '%-40s %10d        %8s' %
+                               ('Total', total, tavg if tavg else ''),
+                               curses.A_BOLD)
         self.screen.refresh()
 
     def show_msg(self, text):
@@ -1358,7 +1367,7 @@ class Tui(object):
                 if char == 'x':
                     self.update_drilldown()
                     # prevents display of current values on next refresh
-                    self.stats.get()
+                    self.stats.get(self._display_guests)
             except KeyboardInterrupt:
                 break
             except curses.error:
@@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.
         try:
             pids = Tui.get_pid_from_gname(val)
         except:
-            raise optparse.OptionValueError('Error while searching for guest '
-                                            '"{}", use "-p" to specify a pid '
-                                            'instead'.format(val))
+            sys.exit('Error while searching for guest "{}". Use "-p" to '
+                     'specify a pid instead?'.format(val))
         if len(pids) == 0:
-            raise optparse.OptionValueError('No guest by the name "{}" '
-                                            'found'.format(val))
+            sys.exit('Error: No guest by the name "{}" found'.format(val))
         if len(pids) > 1:
-            raise optparse.OptionValueError('Multiple processes found (pids: '
-                                            '{}) - use "-p" to specify a pid '
-                                            'instead'.format(" ".join(pids)))
+            sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
+                     'to specify the desired pid'.format(" ".join(pids)))
         parser.values.pid = pids[0]
 
     optparser = optparse.OptionParser(description=description_text,
@@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.
                          help='restrict statistics to guest by name',
                          callback=cb_guest_to_pid,
                          )
-    (options, _) = optparser.parse_args(sys.argv)
+    options, unkn = optparser.parse_args(sys.argv)
+    if len(unkn) != 1:
+        sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
+    try:
+        # verify that we were passed a valid regex up front
+        re.compile(options.fields)
+    except re.error:
+        sys.exit('Error: "' + options.fields + '" is not a valid regular '
+                 'expression')
+
     return options
 
 
@@ -1564,16 +1579,13 @@ def main():
 
     stats = Stats(options)
 
-    if options.fields == "help":
-        event_list = "\n"
-        s = stats.get()
-        for key in s.keys():
-            if key.find('(') != -1:
-                key = key[0:key.find('(')]
-            if event_list.find('\n' + key + '\n') == -1:
-                event_list += key + '\n'
-        sys.stdout.write(event_list)
-        return ""
+    if options.fields == 'help':
+        stats.fields_filter = None
+        event_list = []
+        for key in stats.get().keys():
+            event_list.append(key.split('(', 1)[0])
+        sys.stdout.write('  ' + '\n  '.join(sorted(set(event_list))) + '\n')
+        sys.exit(0)
 
     if options.log:
         log(stats)
index e5cf836..b5b3810 100644 (file)
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
 *s*::   set update interval
 
 *x*::  toggle reporting of stats for child trace events
+ ::     *Note*: The stats for the parents summarize the respective child trace
+                events
 
 Press any other key to refresh statistics immediately.
 
@@ -86,7 +88,7 @@ OPTIONS
 
 -f<fields>::
 --fields=<fields>::
-       fields to display (regex)
+       fields to display (regex), "-f help" for a list of available events
 
 -h::
 --help::
index 0f94af3..e6acc28 100644 (file)
@@ -7,9 +7,11 @@ ARCH := x86
 endif
 
 # always use the host compiler
-CC = gcc
-LD = ld
-AR = ar
+HOSTCC ?= gcc
+HOSTLD ?= ld
+CC      = $(HOSTCC)
+LD      = $(HOSTLD)
+AR      = ar
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -44,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE
        @$(MAKE) $(build)=objtool
 
 $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
-       @./sync-check.sh
+       @$(CONFIG_SHELL) ./sync-check.sh
        $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
 
 
index 8acfc47..540a209 100644 (file)
@@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                        *type = INSN_STACK;
                        op->src.type = OP_SRC_ADD;
                        op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
-                       op->dest.type = OP_SRC_REG;
+                       op->dest.type = OP_DEST_REG;
                        op->dest.reg = CFI_SP;
                }
                break;
index 12e3771..e0b8593 100644 (file)
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
 fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
 fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
 fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
+ff: UD0
 EndTable
 
 Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
 7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
 80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
+81: INVVPID Gy,Mdq (66)
 82: INVPCID Gy,Mdq (66)
 83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
 88: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
 EndTable
 
 GrpTable: Grp10
+# all are UD1
+0: UD1
+1: UD1
+2: UD1
+3: UD1
+4: UD1
+5: UD1
+6: UD1
+7: UD1
 EndTable
 
 # Grp11A and Grp11B are expressed as Grp11 in Intel SDM
index 4c6b5c9..91e8e19 100644 (file)
@@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv)
        const char *objname;
 
        argc--; argv++;
+       if (argc <= 0)
+               usage_with_options(orc_usage, check_options);
+
        if (!strncmp(argv[0], "gen", 3)) {
                argc = parse_options(argc, argv, check_options, orc_usage, 0);
                if (argc != 1)
@@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv)
                objname = argv[0];
 
                return check(objname, no_fp, no_unreachable, true);
-
        }
 
        if (!strcmp(argv[0], "dump")) {
index 9b34158..f40d46e 100644 (file)
@@ -427,6 +427,40 @@ static void add_ignores(struct objtool_file *file)
        }
 }
 
+/*
+ * FIXME: For now, just ignore any alternatives which add retpolines.  This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+       struct section *sec;
+       struct rela *rela;
+       struct instruction *insn;
+
+       sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+       if (!sec)
+               return 0;
+
+       list_for_each_entry(rela, &sec->rela_list, list) {
+               if (rela->sym->type != STT_SECTION) {
+                       WARN("unexpected relocation symbol type in %s", sec->name);
+                       return -1;
+               }
+
+               insn = find_insn(file, rela->sym->sec, rela->addend);
+               if (!insn) {
+                       WARN("bad .discard.nospec entry");
+                       return -1;
+               }
+
+               insn->ignore_alts = true;
+       }
+
+       return 0;
+}
+
 /*
  * Find the destination instructions for all jumps.
  */
@@ -456,6 +490,13 @@ static int add_jump_destinations(struct objtool_file *file)
                } else if (rela->sym->sec->idx) {
                        dest_sec = rela->sym->sec;
                        dest_off = rela->sym->sym.st_value + rela->addend + 4;
+               } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+                       /*
+                        * Retpoline jumps are really dynamic jumps in
+                        * disguise, so convert them accordingly.
+                        */
+                       insn->type = INSN_JUMP_DYNAMIC;
+                       continue;
                } else {
                        /* sibling call */
                        insn->jump_dest = 0;
@@ -502,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file)
                        dest_off = insn->offset + insn->len + insn->immediate;
                        insn->call_dest = find_symbol_by_offset(insn->sec,
                                                                dest_off);
+                       /*
+                        * FIXME: Thanks to retpolines, it's now considered
+                        * normal for a function to call within itself.  So
+                        * disable this warning for now.
+                        */
+#if 0
                        if (!insn->call_dest) {
                                WARN_FUNC("can't find call dest symbol at offset 0x%lx",
                                          insn->sec, insn->offset, dest_off);
                                return -1;
                        }
+#endif
                } else if (rela->sym->type == STT_SECTION) {
                        insn->call_dest = find_symbol_by_offset(rela->sym->sec,
                                                                rela->addend+4);
@@ -671,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file)
                return ret;
 
        list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
-               alt = malloc(sizeof(*alt));
-               if (!alt) {
-                       WARN("malloc failed");
-                       ret = -1;
-                       goto out;
-               }
 
                orig_insn = find_insn(file, special_alt->orig_sec,
                                      special_alt->orig_off);
@@ -687,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file)
                        goto out;
                }
 
+               /* Ignore retpoline alternatives. */
+               if (orig_insn->ignore_alts)
+                       continue;
+
                new_insn = NULL;
                if (!special_alt->group || special_alt->new_len) {
                        new_insn = find_insn(file, special_alt->new_sec,
@@ -712,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file)
                                goto out;
                }
 
+               alt = malloc(sizeof(*alt));
+               if (!alt) {
+                       WARN("malloc failed");
+                       ret = -1;
+                       goto out;
+               }
+
                alt->insn = new_insn;
                list_add_tail(&alt->list, &orig_insn->alts);
 
@@ -1028,6 +1081,10 @@ static int decode_sections(struct objtool_file *file)
 
        add_ignores(file);
 
+       ret = add_nospec_ignores(file);
+       if (ret)
+               return ret;
+
        ret = add_jump_destinations(file);
        if (ret)
                return ret;
index 47d9ea7..dbadb30 100644 (file)
@@ -44,7 +44,7 @@ struct instruction {
        unsigned int len;
        unsigned char type;
        unsigned long immediate;
-       bool alt_group, visited, dead_end, ignore, hint, save, restore;
+       bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
        struct symbol *call_dest;
        struct instruction *jump_dest;
        struct list_head alts;
index 2446015..c1c3386 100644 (file)
@@ -26,6 +26,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
+#include <errno.h>
 
 #include "elf.h"
 #include "warn.h"
@@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags)
 
        elf->fd = open(name, flags);
        if (elf->fd == -1) {
-               perror("open");
+               fprintf(stderr, "objtool: Can't open '%s': %s\n",
+                       name, strerror(errno));
                goto err;
        }
 
index 36c5bf6..c334382 100644 (file)
@@ -76,7 +76,8 @@ int orc_dump(const char *_objname)
        int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
        struct orc_entry *orc = NULL;
        char *name;
-       unsigned long nr_sections, orc_ip_addr = 0;
+       size_t nr_sections;
+       Elf64_Addr orc_ip_addr = 0;
        size_t shstrtab_idx;
        Elf *elf;
        Elf_Scn *scn;
@@ -187,10 +188,10 @@ int orc_dump(const char *_objname)
                                return -1;
                        }
 
-                       printf("%s+%lx:", name, rela.r_addend);
+                       printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
 
                } else {
-                       printf("%lx:", orc_ip_addr + (i * sizeof(int)) + orc_ip[i]);
+                       printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
                }
 
 
index e5ca314..e61fe70 100644 (file)
@@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file)
 
        /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
        sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
+       if (!sec)
+               return -1;
 
        ip_relasec = elf_create_rela_section(file->elf, sec);
        if (!ip_relasec)
index ed65e82..0294bfb 100644 (file)
@@ -188,9 +188,7 @@ ifdef PYTHON_CONFIG
   PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
   PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
   PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
-  ifeq ($(CC_NO_CLANG), 1)
-    PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
-  endif
+  PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
   FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
 endif
 
@@ -576,14 +574,15 @@ ifndef NO_GTK2
   endif
 endif
 
-
 ifdef NO_LIBPERL
   CFLAGS += -DNO_LIBPERL
 else
   PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
   PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
   PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
-  PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+  PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
+  PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
+  PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
   FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
 
   ifneq ($(feature-libperl), 1)
index 21322e0..09ba923 100644 (file)
@@ -2,3 +2,4 @@ ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 endif
 HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
index d2df54a..bcfbaed 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <stdlib.h>
 #include <linux/types.h>
-#include <../../../../arch/s390/include/uapi/asm/perf_regs.h>
+#include <asm/perf_regs.h>
 
 void perf_regs_load(u64 *regs);
 
index f47576c..a8ace5c 100644 (file)
@@ -2,17 +2,43 @@
 /*
  * Mapping of DWARF debug register numbers into register names.
  *
- *    Copyright IBM Corp. 2010
- *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ * Copyright IBM Corp. 2010, 2017
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *           Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *
  */
 
+#include <errno.h>
 #include <stddef.h>
-#include <dwarf-regs.h>
+#include <stdlib.h>
 #include <linux/kernel.h>
+#include <asm/ptrace.h>
+#include <string.h>
+#include <dwarf-regs.h>
 #include "dwarf-regs-table.h"
 
 const char *get_arch_regstr(unsigned int n)
 {
        return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n];
 }
+
+/*
+ * Convert the register name into an offset to struct pt_regs (kernel).
+ * This is required by the BPF prologue generator.  The BPF
+ * program is called in the BPF overflow handler in the perf
+ * core.
+ */
+int regs_query_register_offset(const char *name)
+{
+       unsigned long gpr;
+
+       if (!name || strncmp(name, "%r", 2))
+               return -EINVAL;
+
+       errno = 0;
+       gpr = strtoul(name + 2, NULL, 10);
+       if (errno || gpr >= 16)
+               return -EINVAL;
+
+       return offsetof(user_pt_regs, gprs) + 8 * gpr;
+}
index d95fdcc..944070e 100644 (file)
@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
        NULL
 };
 
+/*
+ * To get number of numa nodes present.
+ */
+static int nr_numa_nodes(void)
+{
+       int i, nr_nodes = 0;
+
+       for (i = 0; i < g->p.nr_nodes; i++) {
+               if (numa_bitmask_isbitset(numa_nodes_ptr, i))
+                       nr_nodes++;
+       }
+
+       return nr_nodes;
+}
+
+/*
+ * To check if given numa node is present.
+ */
+static int is_node_present(int node)
+{
+       return numa_bitmask_isbitset(numa_nodes_ptr, node);
+}
+
+/*
+ * To check given numa node has cpus.
+ */
+static bool node_has_cpus(int node)
+{
+       struct bitmask *cpu = numa_allocate_cpumask();
+       unsigned int i;
+
+       if (cpu && !numa_node_to_cpus(node, cpu)) {
+               for (i = 0; i < cpu->size; i++) {
+                       if (numa_bitmask_isbitset(cpu, i))
+                               return true;
+               }
+       }
+
+       return false; /* lets fall back to nocpus safely */
+}
+
 static cpu_set_t bind_to_cpu(int target_cpu)
 {
        cpu_set_t orig_mask, mask;
@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
 
 static cpu_set_t bind_to_node(int target_node)
 {
-       int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
+       int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
        cpu_set_t orig_mask, mask;
        int cpu;
        int ret;
 
-       BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
+       BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
        BUG_ON(!cpus_per_node);
 
        ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
                        int i;
 
                        for (i = 0; i < mul; i++) {
-                               if (t >= g->p.nr_tasks) {
+                               if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
                                        printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
                                        goto out;
                                }
@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
        sum = 0;
 
        for (node = 0; node < g->p.nr_nodes; node++) {
+               if (!is_node_present(node))
+                       continue;
                nr = nodes[node];
                nr_min = min(nr, nr_min);
                nr_max = max(nr, nr_max);
@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
        process_groups = 0;
 
        for (node = 0; node < g->p.nr_nodes; node++) {
-               int processes = count_node_processes(node);
+               int processes;
 
+               if (!is_node_present(node))
+                       continue;
+               processes = count_node_processes(node);
                nr = nodes[node];
                tprintf(" %2d/%-2d", nr, processes);
 
@@ -1291,7 +1337,7 @@ static void print_summary(void)
 
        printf("\n ###\n");
        printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
-               g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
+               g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
        printf(" #      %5dx %5ldMB global  shared mem operations\n",
                        g->p.nr_loops, g->p.bytes_global/1024/1024);
        printf(" #      %5dx %5ldMB process shared mem operations\n",
index bd1fede..a0f7ed2 100644 (file)
@@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
                add_man_viewer(value);
                return 0;
        }
-       if (!strstarts(var, "man."))
+       if (strstarts(var, "man."))
                return add_man_viewer_info(var, value);
 
        return 0;
@@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
 
        if (!perf_cmd)
                return "perf";
-       else if (!strstarts(perf_cmd, "perf"))
+       else if (strstarts(perf_cmd, "perf"))
                return perf_cmd;
 
        return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
index 3d7f33e..0032559 100644 (file)
@@ -339,6 +339,22 @@ static int record__open(struct record *rec)
        struct perf_evsel_config_term *err_term;
        int rc = 0;
 
+       /*
+        * For initial_delay we need to add a dummy event so that we can track
+        * PERF_RECORD_MMAP while we wait for the initial delay to enable the
+        * real events, the ones asked by the user.
+        */
+       if (opts->initial_delay) {
+               if (perf_evlist__add_dummy(evlist))
+                       return -ENOMEM;
+
+               pos = perf_evlist__first(evlist);
+               pos->tracking = 0;
+               pos = perf_evlist__last(evlist);
+               pos->tracking = 1;
+               pos->attr.enable_on_exec = 1;
+       }
+
        perf_evlist__config(evlist, opts, &callchain_param);
 
        evlist__for_each_entry(evlist, pos) {
@@ -749,17 +765,19 @@ static int record__synthesize(struct record *rec, bool tail)
                        goto out;
        }
 
-       err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
-                                                machine);
-       WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
-                          "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
-                          "Check /proc/kallsyms permission or run as root.\n");
-
-       err = perf_event__synthesize_modules(tool, process_synthesized_event,
-                                            machine);
-       WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
-                          "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
-                          "Check /proc/modules permission or run as root.\n");
+       if (!perf_evlist__exclude_kernel(rec->evlist)) {
+               err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+                                                        machine);
+               WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
+                                  "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
+                                  "Check /proc/kallsyms permission or run as root.\n");
+
+               err = perf_event__synthesize_modules(tool, process_synthesized_event,
+                                                    machine);
+               WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
+                                  "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
+                                  "Check /proc/modules permission or run as root.\n");
+       }
 
        if (perf_guest) {
                machines__process_guests(&session->machines,
@@ -1693,7 +1711,7 @@ int cmd_record(int argc, const char **argv)
 
        err = -ENOMEM;
 
-       if (symbol_conf.kptr_restrict)
+       if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
                pr_warning(
 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
 "check /proc/sys/kernel/kptr_restrict.\n\n"
index 1394cd8..af5dd03 100644 (file)
@@ -441,6 +441,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
        struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
        struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
 
+       if (perf_evlist__exclude_kernel(rep->session->evlist))
+               return;
+
        if (kernel_map == NULL ||
            (kernel_map->dso->hit &&
             (kernel_kmap->ref_reloc_sym == NULL ||
index 68f36dc..9b43bda 100644 (file)
@@ -1955,6 +1955,16 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
        struct perf_evsel *evsel;
 
        evlist__for_each_entry(script->session->evlist, evsel) {
+               /*
+                * Already setup? I.e. we may be called twice in cases like
+                * Intel PT, one for the intel_pt// and dummy events, then
+                * for the evsels syntheized from the auxtrace info.
+                *
+                * Ses perf_script__process_auxtrace_info.
+                */
+               if (evsel->priv != NULL)
+                       continue;
+
                evsel->priv = perf_evsel_script__new(evsel, script->session->data);
                if (evsel->priv == NULL)
                        goto out_err_fclose;
@@ -2838,6 +2848,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
        return set_maps(script);
 }
 
+#ifdef HAVE_AUXTRACE_SUPPORT
+static int perf_script__process_auxtrace_info(struct perf_tool *tool,
+                                             union perf_event *event,
+                                             struct perf_session *session)
+{
+       int ret = perf_event__process_auxtrace_info(tool, event, session);
+
+       if (ret == 0) {
+               struct perf_script *script = container_of(tool, struct perf_script, tool);
+
+               ret = perf_script__setup_per_event_dump(script);
+       }
+
+       return ret;
+}
+#else
+#define perf_script__process_auxtrace_info 0
+#endif
+
 int cmd_script(int argc, const char **argv)
 {
        bool show_full_info = false;
@@ -2866,7 +2895,7 @@ int cmd_script(int argc, const char **argv)
                        .feature         = perf_event__process_feature,
                        .build_id        = perf_event__process_build_id,
                        .id_index        = perf_event__process_id_index,
-                       .auxtrace_info   = perf_event__process_auxtrace_info,
+                       .auxtrace_info   = perf_script__process_auxtrace_info,
                        .auxtrace        = perf_event__process_auxtrace,
                        .auxtrace_error  = perf_event__process_auxtrace_error,
                        .stat            = perf_event__process_stat_event,
index 477a869..9e0d264 100644 (file)
@@ -77,6 +77,7 @@
 #include "sane_ctype.h"
 
 static volatile int done;
+static volatile int resize;
 
 #define HEADER_LINE_NR  5
 
@@ -85,11 +86,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
        top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
 }
 
-static void perf_top__sig_winch(int sig __maybe_unused,
-                               siginfo_t *info __maybe_unused, void *arg)
+static void winch_sig(int sig __maybe_unused)
 {
-       struct perf_top *top = arg;
+       resize = 1;
+}
 
+static void perf_top__resize(struct perf_top *top)
+{
        get_term_dimensions(&top->winsize);
        perf_top__update_print_entries(top);
 }
@@ -473,12 +476,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
                case 'e':
                        prompt_integer(&top->print_entries, "Enter display entries (lines)");
                        if (top->print_entries == 0) {
-                               struct sigaction act = {
-                                       .sa_sigaction = perf_top__sig_winch,
-                                       .sa_flags     = SA_SIGINFO,
-                               };
-                               perf_top__sig_winch(SIGWINCH, NULL, top);
-                               sigaction(SIGWINCH, &act, NULL);
+                               perf_top__resize(top);
+                               signal(SIGWINCH, winch_sig);
                        } else {
                                signal(SIGWINCH, SIG_DFL);
                        }
@@ -732,14 +731,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
        if (!machine->kptr_restrict_warned &&
            symbol_conf.kptr_restrict &&
            al.cpumode == PERF_RECORD_MISC_KERNEL) {
-               ui__warning(
+               if (!perf_evlist__exclude_kernel(top->session->evlist)) {
+                       ui__warning(
 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
 "Check /proc/sys/kernel/kptr_restrict.\n\n"
 "Kernel%s samples will not be resolved.\n",
                          al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
                          " modules" : "");
-               if (use_browser <= 0)
-                       sleep(5);
+                       if (use_browser <= 0)
+                               sleep(5);
+               }
                machine->kptr_restrict_warned = true;
        }
 
@@ -1030,6 +1031,11 @@ static int __cmd_top(struct perf_top *top)
 
                if (hits == top->samples)
                        ret = perf_evlist__poll(top->evlist, 100);
+
+               if (resize) {
+                       perf_top__resize(top);
+                       resize = 0;
+               }
        }
 
        ret = 0;
@@ -1352,12 +1358,8 @@ int cmd_top(int argc, const char **argv)
 
        get_term_dimensions(&top.winsize);
        if (top.print_entries == 0) {
-               struct sigaction act = {
-                       .sa_sigaction = perf_top__sig_winch,
-                       .sa_flags     = SA_SIGINFO,
-               };
                perf_top__update_print_entries(&top);
-               sigaction(SIGWINCH, &act, NULL);
+               signal(SIGWINCH, winch_sig);
        }
 
        status = __cmd_top(&top);
index f2757d3..84debdb 100644 (file)
@@ -1152,12 +1152,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
        if (trace->host == NULL)
                return -ENOMEM;
 
-       if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
-               return -errno;
+       err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
+       if (err < 0)
+               goto out;
 
        err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
                                            evlist->threads, trace__tool_process, false,
                                            trace->opts.proc_map_timeout, 1);
+out:
        if (err)
                symbol__exit();
 
index 77406d2..3e64f10 100755 (executable)
@@ -21,6 +21,7 @@ arch/x86/include/asm/cpufeatures.h
 arch/arm/include/uapi/asm/perf_regs.h
 arch/arm64/include/uapi/asm/perf_regs.h
 arch/powerpc/include/uapi/asm/perf_regs.h
+arch/s390/include/uapi/asm/perf_regs.h
 arch/x86/include/uapi/asm/perf_regs.h
 arch/x86/include/uapi/asm/kvm.h
 arch/x86/include/uapi/asm/kvm_perf.h
@@ -30,6 +31,7 @@ arch/x86/include/uapi/asm/vmx.h
 arch/powerpc/include/uapi/asm/kvm.h
 arch/s390/include/uapi/asm/kvm.h
 arch/s390/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/ptrace.h
 arch/s390/include/uapi/asm/sie.h
 arch/arm/include/uapi/asm/kvm.h
 arch/arm64/include/uapi/asm/kvm.h
index cf36de7..0c6d100 100644 (file)
@@ -384,13 +384,13 @@ jvmti_write_code(void *agent, char const *sym,
 }
 
 int
-jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
-                      jvmti_line_info_t *li, int nr_lines)
+jvmti_write_debug_info(void *agent, uint64_t code,
+    int nr_lines, jvmti_line_info_t *li,
+    const char * const * file_names)
 {
        struct jr_code_debug_info rec;
-       size_t sret, len, size, flen;
+       size_t sret, len, size, flen = 0;
        uint64_t addr;
-       const char *fn = file;
        FILE *fp = agent;
        int i;
 
@@ -405,7 +405,9 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
                return -1;
        }
 
-       flen = strlen(file) + 1;
+       for (i = 0; i < nr_lines; ++i) {
+           flen += strlen(file_names[i]) + 1;
+       }
 
        rec.p.id        = JIT_CODE_DEBUG_INFO;
        size            = sizeof(rec);
@@ -421,7 +423,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
         * file[]   : source file name
         */
        size += nr_lines * sizeof(struct debug_entry);
-       size += flen * nr_lines;
+       size += flen;
        rec.p.total_size = size;
 
        /*
@@ -452,7 +454,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
                if (sret != 1)
                        goto error;
 
-               sret = fwrite_unlocked(fn, flen, 1, fp);
+               sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp);
                if (sret != 1)
                        goto error;
        }
index fe32d83..6ed82f6 100644 (file)
@@ -14,6 +14,7 @@ typedef struct {
        unsigned long   pc;
        int             line_number;
        int             discrim; /* discriminator -- 0 for now */
+       jmethodID       methodID;
 } jvmti_line_info_t;
 
 void *jvmti_open(void);
@@ -22,11 +23,9 @@ int   jvmti_write_code(void *agent, char const *symbol_name,
                       uint64_t vma, void const *code,
                       const unsigned int code_size);
 
-int   jvmti_write_debug_info(void *agent,
-                            uint64_t code,
-                            const char *file,
+int   jvmti_write_debug_info(void *agent, uint64_t code, int nr_lines,
                             jvmti_line_info_t *li,
-                            int nr_lines);
+                            const char * const * file_names);
 
 #if defined(__cplusplus)
 }
index c62c9fc..6add3e9 100644 (file)
@@ -47,6 +47,7 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
                        tab[lines].pc = (unsigned long)pc;
                        tab[lines].line_number = loc_tab[i].line_number;
                        tab[lines].discrim = 0; /* not yet used */
+                       tab[lines].methodID = m;
                        lines++;
                } else {
                        break;
@@ -125,6 +126,99 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
        return JVMTI_ERROR_NONE;
 }
 
+static void
+copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length)
+{
+       /*
+       * Assume path name is class hierarchy, this is a common practice with Java programs
+       */
+       if (*class_sign == 'L') {
+               int j, i = 0;
+               char *p = strrchr(class_sign, '/');
+               if (p) {
+                       /* drop the 'L' prefix and copy up to the final '/' */
+                       for (i = 0; i < (p - class_sign); i++)
+                               result[i] = class_sign[i+1];
+               }
+               /*
+               * append file name, we use loops and not string ops to avoid modifying
+               * class_sign which is used later for the symbol name
+               */
+               for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++)
+                       result[i] = file_name[j];
+
+               result[i] = '\0';
+       } else {
+               /* fallback case */
+               size_t file_name_len = strlen(file_name);
+               strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
+       }
+}
+
+static jvmtiError
+get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer)
+{
+       jvmtiError ret;
+       jclass decl_class;
+       char *file_name = NULL;
+       char *class_sign = NULL;
+       char fn[PATH_MAX];
+       size_t len;
+
+       ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class);
+       if (ret != JVMTI_ERROR_NONE) {
+               print_error(jvmti, "GetMethodDeclaringClass", ret);
+               return ret;
+       }
+
+       ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
+       if (ret != JVMTI_ERROR_NONE) {
+               print_error(jvmti, "GetSourceFileName", ret);
+               return ret;
+       }
+
+       ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL);
+       if (ret != JVMTI_ERROR_NONE) {
+               print_error(jvmti, "GetClassSignature", ret);
+               goto free_file_name_error;
+       }
+
+       copy_class_filename(class_sign, file_name, fn, PATH_MAX);
+       len = strlen(fn);
+       *buffer = malloc((len + 1) * sizeof(char));
+       if (!*buffer) {
+               print_error(jvmti, "GetClassSignature", ret);
+               ret = JVMTI_ERROR_OUT_OF_MEMORY;
+               goto free_class_sign_error;
+       }
+       strcpy(*buffer, fn);
+       ret = JVMTI_ERROR_NONE;
+
+free_class_sign_error:
+       (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
+free_file_name_error:
+       (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
+
+       return ret;
+}
+
+static jvmtiError
+fill_source_filenames(jvmtiEnv *jvmti, int nr_lines,
+                     const jvmti_line_info_t * line_tab,
+                     char ** file_names)
+{
+       int index;
+       jvmtiError ret;
+
+       for (index = 0; index < nr_lines; ++index) {
+               ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index]));
+               if (ret != JVMTI_ERROR_NONE)
+                       return ret;
+       }
+
+       return JVMTI_ERROR_NONE;
+}
+
 static void JNICALL
 compiled_method_load_cb(jvmtiEnv *jvmti,
                        jmethodID method,
@@ -135,16 +229,18 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
                        const void *compile_info)
 {
        jvmti_line_info_t *line_tab = NULL;
+       char ** line_file_names = NULL;
        jclass decl_class;
        char *class_sign = NULL;
        char *func_name = NULL;
        char *func_sign = NULL;
-       char *file_name= NULL;
+       char *file_name = NULL;
        char fn[PATH_MAX];
        uint64_t addr = (uint64_t)(uintptr_t)code_addr;
        jvmtiError ret;
        int nr_lines = 0; /* in line_tab[] */
        size_t len;
+       int output_debug_info = 0;
 
        ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
                                                &decl_class);
@@ -158,6 +254,19 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
                if (ret != JVMTI_ERROR_NONE) {
                        warnx("jvmti: cannot get line table for method");
                        nr_lines = 0;
+               } else if (nr_lines > 0) {
+                       line_file_names = malloc(sizeof(char*) * nr_lines);
+                       if (!line_file_names) {
+                               warnx("jvmti: cannot allocate space for line table method names");
+                       } else {
+                               memset(line_file_names, 0, sizeof(char*) * nr_lines);
+                               ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names);
+                               if (ret != JVMTI_ERROR_NONE) {
+                                       warnx("jvmti: fill_source_filenames failed");
+                               } else {
+                                       output_debug_info = 1;
+                               }
+                       }
                }
        }
 
@@ -181,33 +290,14 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
                goto error;
        }
 
-       /*
-        * Assume path name is class hierarchy, this is a common practice with Java programs
-        */
-       if (*class_sign == 'L') {
-               int j, i = 0;
-               char *p = strrchr(class_sign, '/');
-               if (p) {
-                       /* drop the 'L' prefix and copy up to the final '/' */
-                       for (i = 0; i < (p - class_sign); i++)
-                               fn[i] = class_sign[i+1];
-               }
-               /*
-                * append file name, we use loops and not string ops to avoid modifying
-                * class_sign which is used later for the symbol name
-                */
-               for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++)
-                       fn[i] = file_name[j];
-               fn[i] = '\0';
-       } else {
-               /* fallback case */
-               strcpy(fn, file_name);
-       }
+       copy_class_filename(class_sign, file_name, fn, PATH_MAX);
+
        /*
         * write source line info record if we have it
         */
-       if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines))
-               warnx("jvmti: write_debug_info() failed");
+       if (output_debug_info)
+               if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names))
+                       warnx("jvmti: write_debug_info() failed");
 
        len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
        {
@@ -223,6 +313,13 @@ error:
        (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
        (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
        free(line_tab);
+       while (line_file_names && (nr_lines > 0)) {
+           if (line_file_names[nr_lines - 1]) {
+               free(line_file_names[nr_lines - 1]);
+           }
+           nr_lines -= 1;
+       }
+       free(line_file_names);
 }
 
 static void JNICALL
index 7a84d73..8b3da21 100755 (executable)
@@ -10,8 +10,8 @@
 
 . $(dirname $0)/lib/probe.sh
 
-ld=$(realpath /lib64/ld*.so.* | uniq)
-libc=$(echo $ld | sed 's/ld/libc/g')
+libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
+nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
 
 trace_libc_inet_pton_backtrace() {
        idx=0
@@ -37,6 +37,9 @@ trace_libc_inet_pton_backtrace() {
        done
 }
 
+# Check for IPv6 interface existence
+ip a sh lo | fgrep -q inet6 || exit 2
+
 skip_if_no_perf_probe && \
 perf probe -q $libc inet_pton && \
 trace_libc_inet_pton_backtrace
index 2e68c5f..2a9ef08 100755 (executable)
@@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2
 file=$(mktemp /tmp/temporary_file.XXXXX)
 
 trace_open_vfs_getname() {
-       perf trace -e open touch $file 2>&1 | \
-       egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
+       test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
+
+       perf trace -e ${svc:-open} touch $file 2>&1 | \
+       egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
 }
 
 
index bc4a734..89c8e16 100644 (file)
@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
 
        evsel = perf_evlist__first(evlist);
        evsel->attr.task = 1;
+#ifdef __s390x__
+       evsel->attr.sample_freq = 1000000;
+#else
        evsel->attr.sample_freq = 1;
+#endif
        evsel->attr.inherit = 0;
        evsel->attr.watermark = 0;
        evsel->attr.wakeup_events = 1;
index 9e1668b..417e3ec 100644 (file)
@@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
        P_MMAP_FLAG(POPULATE);
        P_MMAP_FLAG(STACK);
        P_MMAP_FLAG(UNINITIALIZED);
+#ifdef MAP_SYNC
+       P_MMAP_FLAG(SYNC);
+#endif
 #undef P_MMAP_FLAG
 
        if (flags)
index da1c4c4..3369c78 100644 (file)
@@ -165,7 +165,7 @@ static void ins__delete(struct ins_operands *ops)
 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
                              struct ins_operands *ops)
 {
-       return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
+       return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
 }
 
 int ins__scnprintf(struct ins *ins, char *bf, size_t size,
@@ -230,12 +230,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
        if (ops->target.name)
-               return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
+               return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
 
        if (ops->target.addr == 0)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
-       return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
+       return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
 }
 
 static struct ins_ops call_ops = {
@@ -299,7 +299,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                        c++;
        }
 
-       return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
+       return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
                         ins->name, c ? c - ops->raw : 0, ops->raw,
                         ops->target.offset);
 }
@@ -372,7 +372,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
        if (ops->locked.ins.ops == NULL)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
-       printed = scnprintf(bf, size, "%-6.6s ", ins->name);
+       printed = scnprintf(bf, size, "%-6s ", ins->name);
        return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
                                        size - printed, ops->locked.ops);
 }
@@ -448,7 +448,7 @@ out_free_source:
 static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
-       return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
+       return scnprintf(bf, size, "%-6s %s,%s", ins->name,
                         ops->source.name ?: ops->source.raw,
                         ops->target.name ?: ops->target.raw);
 }
@@ -488,7 +488,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
 static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
-       return scnprintf(bf, size, "%-6.6s %s", ins->name,
+       return scnprintf(bf, size, "%-6s %s", ins->name,
                         ops->target.name ?: ops->target.raw);
 }
 
@@ -500,7 +500,7 @@ static struct ins_ops dec_ops = {
 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
                          struct ins_operands *ops __maybe_unused)
 {
-       return scnprintf(bf, size, "%-6.6s", "nop");
+       return scnprintf(bf, size, "%-6s", "nop");
 }
 
 static struct ins_ops nop_ops = {
@@ -924,7 +924,7 @@ void disasm_line__free(struct disasm_line *dl)
 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
 {
        if (raw || !dl->ins.ops)
-               return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
+               return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
 
        return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
 }
index c6c891e..b62e523 100644 (file)
@@ -257,7 +257,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)
                .config = PERF_COUNT_SW_DUMMY,
                .size   = sizeof(attr), /* to capture ABI version */
        };
-       struct perf_evsel *evsel = perf_evsel__new(&attr);
+       struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
 
        if (evsel == NULL)
                return -ENOMEM;
@@ -1786,3 +1786,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
 state_err:
        return;
 }
+
+bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (!evsel->attr.exclude_kernel)
+                       return false;
+       }
+
+       return true;
+}
index e72ae64..491f695 100644 (file)
@@ -312,4 +312,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
 
 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
                                            union perf_event *event);
+
+bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
 #endif /* __PERF_EVLIST_H */
index f894893..d5fbcf8 100644 (file)
@@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,
        list_for_each_entry(term, config_terms, list) {
                switch (term->type) {
                case PERF_EVSEL__CONFIG_TERM_PERIOD:
-                       attr->sample_period = term->val.period;
-                       attr->freq = 0;
+                       if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
+                               attr->sample_period = term->val.period;
+                               attr->freq = 0;
+                       }
                        break;
                case PERF_EVSEL__CONFIG_TERM_FREQ:
-                       attr->sample_freq = term->val.freq;
-                       attr->freq = 1;
+                       if (!(term->weak && opts->user_freq != UINT_MAX)) {
+                               attr->sample_freq = term->val.freq;
+                               attr->freq = 1;
+                       }
                        break;
                case PERF_EVSEL__CONFIG_TERM_TIME:
                        if (term->val.time)
@@ -1371,7 +1375,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,
 static int
 perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
 {
-       struct perf_stat_evsel *ps = leader->priv;
+       struct perf_stat_evsel *ps = leader->stats;
        u64 read_format = leader->attr.read_format;
        int size = perf_evsel__read_size(leader);
        u64 *data = ps->group_data;
index 9277df9..157f49e 100644 (file)
@@ -67,6 +67,7 @@ struct perf_evsel_config_term {
                bool    overwrite;
                char    *branch;
        } val;
+       bool weak;
 };
 
 struct perf_stat_evsel;
index 125ecd2..52dc8d9 100644 (file)
 #define INAT_MAKE_GROUP(grp)   ((grp << INAT_GRP_OFFS) | INAT_MODRM)
 #define INAT_MAKE_IMM(imm)     (imm << INAT_IMM_OFFS)
 
+/* Identifiers for segment registers */
+#define INAT_SEG_REG_IGNORE    0
+#define INAT_SEG_REG_DEFAULT   1
+#define INAT_SEG_REG_CS                2
+#define INAT_SEG_REG_SS                3
+#define INAT_SEG_REG_DS                4
+#define INAT_SEG_REG_ES                5
+#define INAT_SEG_REG_FS                6
+#define INAT_SEG_REG_GS                7
+
 /* Attribute search APIs */
 extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
 extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
index 12e3771..e0b8593 100644 (file)
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
 fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
 fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
 fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
+ff: UD0
 EndTable
 
 Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
 7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
 80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
+81: INVVPID Gy,Mdq (66)
 82: INVPCID Gy,Mdq (66)
 83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
 88: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
 EndTable
 
 GrpTable: Grp10
+# all are UD1
+0: UD1
+1: UD1
+2: UD1
+3: UD1
+4: UD1
+5: UD1
+6: UD1
+7: UD1
 EndTable
 
 # Grp11A and Grp11B are expressed as Grp11 in Intel SDM
index 6a8d03c..270f322 100644 (file)
@@ -172,6 +172,9 @@ void machine__exit(struct machine *machine)
 {
        int i;
 
+       if (machine == NULL)
+               return;
+
        machine__destroy_kernel_maps(machine);
        map_groups__exit(&machine->kmaps);
        dsos__exit(&machine->dsos);
index efd78b8..3a5cb5a 100644 (file)
@@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md);
 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
 {
        struct perf_event_mmap_page *pc = mm->base;
-       u64 head = ACCESS_ONCE(pc->data_head);
+       u64 head = READ_ONCE(pc->data_head);
        rmb();
        return head;
 }
index a7fcd95..1703167 100644 (file)
@@ -1116,6 +1116,7 @@ do {                                                              \
        INIT_LIST_HEAD(&__t->list);                             \
        __t->type       = PERF_EVSEL__CONFIG_TERM_ ## __type;   \
        __t->val.__name = __val;                                \
+       __t->weak       = term->weak;                           \
        list_add_tail(&__t->list, head_terms);                  \
 } while (0)
 
@@ -2410,6 +2411,7 @@ static int new_term(struct parse_events_term **_term,
 
        *term = *temp;
        INIT_LIST_HEAD(&term->list);
+       term->weak = false;
 
        switch (term->type_val) {
        case PARSE_EVENTS__TERM_TYPE_NUM:
index be337c2..88108cd 100644 (file)
@@ -101,6 +101,9 @@ struct parse_events_term {
        /* error string indexes for within parsed string */
        int err_term;
        int err_val;
+
+       /* Coming from implicit alias */
+       bool weak;
 };
 
 struct parse_events_error {
index 07cb2ac..80fb159 100644 (file)
@@ -405,6 +405,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
                        parse_events_terms__purge(&list);
                        return ret;
                }
+               /*
+                * Weak terms don't override command line options,
+                * which we don't want for implicit terms in aliases.
+                */
+               cloned->weak = true;
                list_add_tail(&cloned->list, &list);
        }
        list_splice(&list, terms);
index 22c3b4e..be418fb 100644 (file)
@@ -79,7 +79,7 @@ struct ap_dump_action action_table[AP_MAX_ACTIONS];
 u32 current_action = 0;
 
 #define AP_UTILITY_NAME             "ACPI Binary Table Dump Utility"
-#define AP_SUPPORTED_OPTIONS        "?a:bc:f:hn:o:r:svxz"
+#define AP_SUPPORTED_OPTIONS        "?a:bc:f:hn:o:r:sv^xz"
 
 /******************************************************************************
  *
@@ -100,6 +100,7 @@ static void ap_display_usage(void)
        ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP");
        ACPI_OPTION("-s", "Print table summaries only");
        ACPI_OPTION("-v", "Display version information");
+       ACPI_OPTION("-vd", "Display build date and time");
        ACPI_OPTION("-z", "Verbose mode");
 
        ACPI_USAGE_TEXT("\nTable Options:\n");
@@ -231,10 +232,29 @@ static int ap_do_options(int argc, char **argv)
                        }
                        continue;
 
-               case 'v':       /* Revision/version */
+               case 'v':       /* -v: (Version): signon already emitted, just exit */
 
-                       acpi_os_printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
-                       return (1);
+                       switch (acpi_gbl_optarg[0]) {
+                       case '^':       /* -v: (Version) */
+
+                               fprintf(stderr,
+                                       ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+                               return (1);
+
+                       case 'd':
+
+                               fprintf(stderr,
+                                       ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+                               printf(ACPI_COMMON_BUILD_TIME);
+                               return (1);
+
+                       default:
+
+                               printf("Unknown option: -v%s\n",
+                                      acpi_gbl_optarg);
+                               return (-1);
+                       }
+                       break;
 
                case 'z':       /* Verbose mode */
 
index c25a74a..2bb3eef 100644 (file)
@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
 
        dprintf("set %s as cpufreq governor\n", governor);
 
-       if (cpupower_is_cpu_online(cpu) != 0) {
+       if (cpupower_is_cpu_online(cpu) != 1) {
                perror("cpufreq_cpu_exists");
                fprintf(stderr, "error: cpu %u does not exist\n", cpu);
                return -1;
index 3b005c3..60beaf5 100644 (file)
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef __CPUPOWER_CPUFREQ_H__
index 1b5da00..5b3205f 100644 (file)
@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
 {
        int num;
        char *tmp;
+       int this_cpu;
+
+       this_cpu = sched_getcpu();
 
        /* Assume idle state count is the same for all CPUs */
-       cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
+       cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
 
        if (cpuidle_sysfs_monitor.hw_states_num <= 0)
                return NULL;
 
        for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
-               tmp = cpuidle_state_name(0, num);
+               tmp = cpuidle_state_name(this_cpu, num);
                if (tmp == NULL)
                        continue;
 
@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
                strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
                free(tmp);
 
-               tmp = cpuidle_state_desc(0, num);
+               tmp = cpuidle_state_desc(this_cpu, num);
                if (tmp == NULL)
                        continue;
                strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
index 0b24dd9..29f50d4 100755 (executable)
@@ -411,6 +411,16 @@ def set_trace_buffer_size():
         print('IO error setting trace buffer size ')
         quit()
 
+def free_trace_buffer():
+    """ Free the trace buffer memory """
+
+    try:
+       open('/sys/kernel/debug/tracing/buffer_size_kb'
+                 , 'w').write("1")
+    except:
+        print('IO error setting trace buffer size ')
+        quit()
+
 def read_trace_data(filename):
     """ Read and parse trace data """
 
@@ -583,4 +593,9 @@ for root, dirs, files in os.walk('.'):
     for f in files:
         fix_ownership(f)
 
+clear_trace_file()
+# Free the memory
+if interval:
+    free_trace_buffer()
+
 os.chdir('../../')
index 333a486..9316e64 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+
 LIBDIR := ../../../lib
 BPFDIR := $(LIBDIR)/bpf
 APIDIR := ../../../include/uapi
@@ -10,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
 endif
 
 CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
-LDLIBS += -lcap -lelf
+LDLIBS += -lcap -lelf -lrt
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
        test_align test_verifier_log test_dev_cgroup
@@ -38,7 +39,7 @@ $(BPFOBJ): force
 CLANG ?= clang
 LLC   ?= llc
 
-PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
 
 # Let newer LLVM versions transparently probe the kernel for availability
 # of full BPF instruction set.
index 8591c89..471bbbd 100644 (file)
@@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {
                .result = REJECT,
                .matches = {
                        {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
-                       /* ptr & 0x40 == either 0 or 0x40 */
-                       {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
-                       /* ptr << 2 == unknown, (4n) */
-                       {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
-                       /* (4n) + 14 == (4n+2).  We blow our bounds, because
-                        * the add could overflow.
-                        */
-                       {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
-                       /* Checked s>=0 */
-                       {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
-                       /* packet pointer + nonnegative (4n+2) */
-                       {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
-                       {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
-                       /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
-                        * We checked the bounds, but it might have been able
-                        * to overflow if the packet pointer started in the
-                        * upper half of the address space.
-                        * So we did not get a 'range' on R6, and the access
-                        * attempt will fail.
-                        */
-                       {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+                       /* R5 bitwise operator &= on pointer prohibited */
                }
        },
        {
index 6942753..6761be1 100644 (file)
@@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)
                          info_len != sizeof(struct bpf_map_info) ||
                          strcmp((char *)map_infos[i].name, expected_map_name),
                          "get-map-info(fd)",
-                         "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+                         "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
                          err, errno,
                          map_infos[i].type, BPF_MAP_TYPE_ARRAY,
                          info_len, sizeof(struct bpf_map_info),
@@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)
                          *(int *)prog_infos[i].map_ids != map_infos[i].id ||
                          strcmp((char *)prog_infos[i].name, expected_prog_name),
                          "get-prog-info(fd)",
-                         "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+                         "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
                          err, errno, i,
                          prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
                          info_len, sizeof(struct bpf_prog_info),
@@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)
                      memcmp(&prog_info, &prog_infos[i], info_len) ||
                      *(int *)prog_info.map_ids != saved_map_id,
                      "get-prog-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
+                     "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
                      err, errno, info_len, sizeof(struct bpf_prog_info),
                      memcmp(&prog_info, &prog_infos[i], info_len),
                      *(int *)prog_info.map_ids, saved_map_id);
@@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)
                      memcmp(&map_info, &map_infos[i], info_len) ||
                      array_value != array_magic_value,
                      "check get-map-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
+                     "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
                      err, errno, info_len, sizeof(struct bpf_map_info),
                      memcmp(&map_info, &map_infos[i], info_len),
                      array_value, array_magic_value);
index 3c64f30..5ed4175 100644 (file)
@@ -272,6 +272,46 @@ static struct bpf_test tests[] = {
                .errstr = "invalid bpf_ld_imm64 insn",
                .result = REJECT,
        },
+       {
+               "arsh32 on imm",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "BPF_ARSH not supported for 32 bit ALU",
+       },
+       {
+               "arsh32 on reg",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_MOV64_IMM(BPF_REG_1, 5),
+                       BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "BPF_ARSH not supported for 32 bit ALU",
+       },
+       {
+               "arsh64 on imm",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "arsh64 on reg",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_MOV64_IMM(BPF_REG_1, 5),
+                       BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
        {
                "no bpf_exit",
                .insns = {
@@ -422,9 +462,7 @@ static struct bpf_test tests[] = {
                        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R1 subtraction from stack pointer",
-               .result_unpriv = REJECT,
-               .errstr = "R1 invalid mem access",
+               .errstr = "R1 subtraction from stack pointer",
                .result = REJECT,
        },
        {
@@ -606,7 +644,6 @@ static struct bpf_test tests[] = {
                },
                .errstr = "misaligned stack access",
                .result = REJECT,
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "invalid map_fd for function call",
@@ -1797,7 +1834,6 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +1846,6 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "PTR_TO_STACK store/load - out of bounds low",
@@ -1862,9 +1897,8 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R1 pointer += pointer",
+               .result = REJECT,
+               .errstr = "R1 pointer += pointer",
        },
        {
                "unpriv: neg pointer",
@@ -2558,6 +2592,29 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "context stores via ST",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "BPF_ST stores into R1 context is not allowed",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "context stores via XADD",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
+                                    BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "BPF_XADD stores into R1 context is not allowed",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "direct packet access: test1",
                .insns = {
@@ -2592,7 +2649,8 @@ static struct bpf_test tests[] = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
                        BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
                        BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2899,7 +2957,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "invalid access to packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
@@ -3885,9 +3943,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3, 11 },
-               .errstr_unpriv = "R0 pointer += pointer",
-               .errstr = "R0 invalid mem access 'inv'",
-               .result_unpriv = REJECT,
+               .errstr = "R0 pointer += pointer",
                .result = REJECT,
                .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
@@ -3928,7 +3984,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -3949,7 +4005,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -3970,7 +4026,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -4279,7 +4335,8 @@ static struct bpf_test tests[] = {
                .fixup_map1 = { 2 },
                .errstr_unpriv = "R2 leaks addr into mem",
                .result_unpriv = REJECT,
-               .result = ACCEPT,
+               .result = REJECT,
+               .errstr = "BPF_XADD stores into R1 context is not allowed",
        },
        {
                "leak pointer into ctx 2",
@@ -4293,7 +4350,8 @@ static struct bpf_test tests[] = {
                },
                .errstr_unpriv = "R10 leaks addr into mem",
                .result_unpriv = REJECT,
-               .result = ACCEPT,
+               .result = REJECT,
+               .errstr = "BPF_XADD stores into R1 context is not allowed",
        },
        {
                "leak pointer into ctx 3",
@@ -5195,10 +5253,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 bitwise operator &= on pointer",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 bitwise operator &= on pointer",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 2",
@@ -5214,10 +5270,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 32-bit pointer arithmetic prohibited",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 3",
@@ -5233,10 +5287,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 pointer arithmetic with /= operator",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 pointer arithmetic with /= operator",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 4",
@@ -6019,8 +6071,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map_in_map = { 3 },
-               .errstr = "R1 type=inv expected=map_ptr",
-               .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
+               .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
                .result = REJECT,
        },
        {
@@ -6116,6 +6167,30 @@ static struct bpf_test tests[] = {
                },
                .result = ACCEPT,
        },
+       {
+               "ld_abs: tests on r6 and skb data reload helper",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_6, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_vlan_push),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
        {
                "ld_ind: check calling conv, r1",
                .insns = {
@@ -6300,7 +6375,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6324,7 +6399,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6350,7 +6425,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R8 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6375,7 +6450,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R8 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6423,7 +6498,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6494,7 +6569,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6545,7 +6620,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6572,7 +6647,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6598,7 +6673,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6627,7 +6702,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6657,7 +6732,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JA, 0, 0, -7),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R0 min value is negative",
+               .errstr = "R0 invalid mem access 'inv'",
                .result = REJECT,
        },
        {
@@ -6685,8 +6760,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr_unpriv = "R0 pointer comparison prohibited",
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
                .result_unpriv = REJECT,
        },
@@ -6741,6 +6815,462 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result = REJECT,
        },
+       {
+               "bounds check based on zero-extended MOV",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0x0000'0000'ffff'ffff */
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0 */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check based on sign-extended MOV. test1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0xffff'ffff'ffff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0xffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+                       /* r0 = <oob pointer> */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access to OOB pointer */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 4294967295",
+               .result = REJECT
+       },
+       {
+               "bounds check based on sign-extended MOV. test2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0xffff'ffff'ffff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0xfff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+                       /* r0 = <oob pointer> */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access to OOB pointer */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 min value is outside of the array range",
+               .result = REJECT
+       },
+       {
+               "bounds check based on reg_off + var_off + insn_off. test1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "value_size=8 off=1073741825",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "bounds check based on reg_off + var_off + insn_off. test2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "value 1073741823",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "bounds check after truncation of non-boundary-crossing range",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       /* r2 = 0x10'0000'0000 */
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+                       /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0 */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check after truncation of boundary-crossing range (1)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0xffff'ffff] or
+                        *      [0x0000'0000, 0x0000'007f]
+                        */
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0x00, 0xff] or
+                        *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = 0 or
+                        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op or OOB pointer computation */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               /* not actually fully unbounded, but the bound is very high */
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check after truncation of boundary-crossing range (2)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0xffff'ffff] or
+                        *      [0x0000'0000, 0x0000'007f]
+                        * difference to previous test: truncation via MOV32
+                        * instead of ALU32.
+                        */
+                       BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0x00, 0xff] or
+                        *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = 0 or
+                        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op or OOB pointer computation */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               /* not actually fully unbounded, but the bound is very high */
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check after wrapping 32-bit addition",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       /* r1 = 0x7fff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0xffff'fffe */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0 */
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check after shift with oversized count operand",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_2, 32),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       /* r1 = (u32)1 << (u32)32 = ? */
+                       BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+                       /* r1 = [0x0000, 0xffff] */
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+                       /* computes unknown pointer, potentially OOB */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 max value is outside of the array range",
+               .result = REJECT
+       },
+       {
+               "bounds check after right shift of maybe-negative number",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       /* r1 = [-0x01, 0xfe] */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+                       /* r1 = 0 or 0xff'ffff'ffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* r1 = 0 or 0xffff'ffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* computes unknown pointer, potentially OOB */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 2147483646",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "pointer offset 1073741822",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "pointer offset -1073741822",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test4",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_1, 1000000),
+                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 1000000000000",
+               .result = REJECT
+       },
+       {
+               "pointer/scalar confusion in state equality check (way 1)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(1),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr as return value"
+       },
+       {
+               "pointer/scalar confusion in state equality check (way 2)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_JMP_A(1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr as return value"
+       },
        {
                "variable-offset ctx access",
                .insns = {
@@ -6782,6 +7312,71 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_LWT_IN,
        },
+       {
+               "indirect variable-offset stack access",
+               .insns = {
+                       /* Fill the top 8 bytes of the stack */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       /* Get an unknown value */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+                       /* Make it small and 4-byte aligned */
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+                       /* add it to fp.  We now have either fp-4 or fp-8, but
+                        * we don't know which
+                        */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+                       /* dereference it indirectly */
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 5 },
+               .errstr = "variable stack read R2",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_LWT_IN,
+       },
+       {
+               "direct stack access with 32-bit wraparound. test1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer and 2147483647",
+               .result = REJECT
+       },
+       {
+               "direct stack access with 32-bit wraparound. test2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer and 1073741823",
+               .result = REJECT
+       },
+       {
+               "direct stack access with 32-bit wraparound. test3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer offset 1073741822",
+               .result = REJECT
+       },
        {
                "liveness pruning and write screening",
                .insns = {
@@ -7103,6 +7698,19 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "pkt_end - pkt_start is allowed",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "XDP pkt read, pkt_end mangling, bad access 1",
                .insns = {
@@ -7118,7 +7726,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "R1 offset is outside of the packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
@@ -7137,7 +7745,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "R1 offset is outside of the packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
@@ -8025,6 +8633,127 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_XDP,
                .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
+       {
+               "check deducing bounds from const, 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "check deducing bounds from const, 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 4",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "check deducing bounds from const, 5",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 6",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 7",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, ~0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "dereference of modified ctx ptr",
+       },
+       {
+               "check deducing bounds from const, 8",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, ~0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "dereference of modified ctx ptr",
+       },
+       {
+               "check deducing bounds from const, 9",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 10",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+                       /* Marks reg as unknown. */
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+       },
        {
                "bpf_exit with invalid return code. test1",
                .insns = {
index 3cc0b56..e9626cf 100644 (file)
@@ -3,6 +3,8 @@
 #include <stdio.h>
 #include <string.h>
 #include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 
 #include <linux/bpf.h>
 #include <linux/filter.h>
@@ -131,11 +133,16 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
 
 int main(int argc, char **argv)
 {
+       struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };
        char full_log[LOG_SIZE];
        char log[LOG_SIZE];
        size_t want_len;
        int i;
 
+       /* allow unlimited locked memory to have more consistent error code */
+       if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+               perror("Unable to lift memlock rlimit");
+
        memset(log, 1, LOG_SIZE);
 
        /* Test incorrect attr */
index e57b4ac..7177bea 100644 (file)
@@ -1,3 +1,4 @@
 CONFIG_USER_NS=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
+CONFIG_NUMA=y
index 939a337..5d4f10a 100644 (file)
@@ -7,7 +7,7 @@ include ../lib.mk
 
 TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
                        check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
-                       protection_keys test_vdso
+                       protection_keys test_vdso test_vsyscall
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
                        test_FCMOV test_FCOMI test_FISTTP \
                        vdso_restorer
index 66e5ce5..1aef72d 100644 (file)
@@ -122,8 +122,7 @@ static void check_valid_segment(uint16_t index, int ldt,
         * NB: Different Linux versions do different things with the
         * accessed bit in set_thread_area().
         */
-       if (ar != expected_ar &&
-           (ldt || ar != (expected_ar | AR_ACCESSED))) {
+       if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) {
                printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
                       (ldt ? "LDT" : "GDT"), index, ar, expected_ar);
                nerrs++;
@@ -627,13 +626,10 @@ static void do_multicpu_tests(void)
 static int finish_exec_test(void)
 {
        /*
-        * In a sensible world, this would be check_invalid_segment(0, 1);
-        * For better or for worse, though, the LDT is inherited across exec.
-        * We can probably change this safely, but for now we test it.
+        * Older kernel versions did inherit the LDT on exec() which is
+        * wrong because exec() starts from a clean state.
         */
-       check_valid_segment(0, 1,
-                           AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB,
-                           42, true);
+       check_invalid_segment(0, 1);
 
        return nerrs ? 1 : 0;
 }
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
new file mode 100644 (file)
index 0000000..7a744fa
--- /dev/null
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <dlfcn.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <sys/ucontext.h>
+#include <errno.h>
+#include <err.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <setjmp.h>
+
+#ifdef __x86_64__
+# define VSYS(x) (x)
+#else
+# define VSYS(x) 0
+#endif
+
+#ifndef SYS_getcpu
+# ifdef __x86_64__
+#  define SYS_getcpu 309
+# else
+#  define SYS_getcpu 318
+# endif
+#endif
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+                      int flags)
+{
+       struct sigaction sa;
+       memset(&sa, 0, sizeof(sa));
+       sa.sa_sigaction = handler;
+       sa.sa_flags = SA_SIGINFO | flags;
+       sigemptyset(&sa.sa_mask);
+       if (sigaction(sig, &sa, 0))
+               err(1, "sigaction");
+}
+
+/* vsyscalls and vDSO */
+bool should_read_vsyscall = false;
+
+typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
+gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
+gtod_t vdso_gtod;
+
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+vgettime_t vdso_gettime;
+
+typedef long (*time_func_t)(time_t *t);
+time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
+time_func_t vdso_time;
+
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
+getcpu_t vdso_getcpu;
+
+static void init_vdso(void)
+{
+       void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+       if (!vdso)
+               vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+       if (!vdso) {
+               printf("[WARN]\tfailed to find vDSO\n");
+               return;
+       }
+
+       vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
+       if (!vdso_gtod)
+               printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
+
+       vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+       if (!vdso_gettime)
+               printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
+
+       vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
+       if (!vdso_time)
+               printf("[WARN]\tfailed to find time in vDSO\n");
+
+       vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
+       if (!vdso_getcpu) {
+               /* getcpu() was never wired up in the 32-bit vDSO. */
+               printf("[%s]\tfailed to find getcpu in vDSO\n",
+                      sizeof(long) == 8 ? "WARN" : "NOTE");
+       }
+}
+
+static int init_vsys(void)
+{
+#ifdef __x86_64__
+       int nerrs = 0;
+       FILE *maps;
+       char line[128];
+       bool found = false;
+
+       maps = fopen("/proc/self/maps", "r");
+       if (!maps) {
+               printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
+               should_read_vsyscall = true;
+               return 0;
+       }
+
+       while (fgets(line, sizeof(line), maps)) {
+               char r, x;
+               void *start, *end;
+               char name[128];
+               if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
+                          &start, &end, &r, &x, name) != 5)
+                       continue;
+
+               if (strcmp(name, "[vsyscall]"))
+                       continue;
+
+               printf("\tvsyscall map: %s", line);
+
+               if (start != (void *)0xffffffffff600000 ||
+                   end != (void *)0xffffffffff601000) {
+                       printf("[FAIL]\taddress range is nonsense\n");
+                       nerrs++;
+               }
+
+               printf("\tvsyscall permissions are %c-%c\n", r, x);
+               should_read_vsyscall = (r == 'r');
+               if (x != 'x') {
+                       vgtod = NULL;
+                       vtime = NULL;
+                       vgetcpu = NULL;
+               }
+
+               found = true;
+               break;
+       }
+
+       fclose(maps);
+
+       if (!found) {
+               printf("\tno vsyscall map in /proc/self/maps\n");
+               should_read_vsyscall = false;
+               vgtod = NULL;
+               vtime = NULL;
+               vgetcpu = NULL;
+       }
+
+       return nerrs;
+#else
+       return 0;
+#endif
+}
+
+/* syscalls */
+static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
+{
+       return syscall(SYS_gettimeofday, tv, tz);
+}
+
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+       return syscall(SYS_clock_gettime, id, ts);
+}
+
+static inline long sys_time(time_t *t)
+{
+       return syscall(SYS_time, t);
+}
+
+static inline long sys_getcpu(unsigned * cpu, unsigned * node,
+                             void* cache)
+{
+       return syscall(SYS_getcpu, cpu, node, cache);
+}
+
+static jmp_buf jmpbuf;
+
+static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+{
+       siglongjmp(jmpbuf, 1);
+}
+
+static double tv_diff(const struct timeval *a, const struct timeval *b)
+{
+       return (double)(a->tv_sec - b->tv_sec) +
+               (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
+}
+
+static int check_gtod(const struct timeval *tv_sys1,
+                     const struct timeval *tv_sys2,
+                     const struct timezone *tz_sys,
+                     const char *which,
+                     const struct timeval *tv_other,
+                     const struct timezone *tz_other)
+{
+       int nerrs = 0;
+       double d1, d2;
+
+       if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
+               printf("[FAIL] %s tz mismatch\n", which);
+               nerrs++;
+       }
+
+       d1 = tv_diff(tv_other, tv_sys1);
+       d2 = tv_diff(tv_sys2, tv_other); 
+       printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
+
+       if (d1 < 0 || d2 < 0) {
+               printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
+               nerrs++;
+       } else {
+               printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
+       }
+
+       return nerrs;
+}
+
+static int test_gtod(void)
+{
+       struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
+       struct timezone tz_sys, tz_vdso, tz_vsys;
+       long ret_vdso = -1;
+       long ret_vsys = -1;
+       int nerrs = 0;
+
+       printf("[RUN]\ttest gettimeofday()\n");
+
+       if (sys_gtod(&tv_sys1, &tz_sys) != 0)
+               err(1, "syscall gettimeofday");
+       if (vdso_gtod)
+               ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
+       if (vgtod)
+               ret_vsys = vgtod(&tv_vsys, &tz_vsys);
+       if (sys_gtod(&tv_sys2, &tz_sys) != 0)
+               err(1, "syscall gettimeofday");
+
+       if (vdso_gtod) {
+               if (ret_vdso == 0) {
+                       nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
+               } else {
+                       printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
+                       nerrs++;
+               }
+       }
+
+       if (vgtod) {
+               if (ret_vsys == 0) {
+                       nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
+               } else {
+                       printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
+                       nerrs++;
+               }
+       }
+
+       return nerrs;
+}
+
+static int test_time(void) {
+       int nerrs = 0;
+
+       printf("[RUN]\ttest time()\n");
+       long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
+       long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
+       t_sys1 = sys_time(&t2_sys1);
+       if (vdso_time)
+               t_vdso = vdso_time(&t2_vdso);
+       if (vtime)
+               t_vsys = vtime(&t2_vsys);
+       t_sys2 = sys_time(&t2_sys2);
+       if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
+               printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
+               nerrs++;
+               return nerrs;
+       }
+
+       if (vdso_time) {
+               if (t_vdso < 0 || t_vdso != t2_vdso) {
+                       printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
+                       nerrs++;
+               } else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
+                       printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
+                       nerrs++;
+               } else {
+                       printf("[OK]\tvDSO time() is okay\n");
+               }
+       }
+
+       if (vtime) {
+               if (t_vsys < 0 || t_vsys != t2_vsys) {
+                       printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
+                       nerrs++;
+               } else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
+                       printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
+                       nerrs++;
+               } else {
+                       printf("[OK]\tvsyscall time() is okay\n");
+               }
+       }
+
+       return nerrs;
+}
+
+static int test_getcpu(int cpu)
+{
+       int nerrs = 0;
+       long ret_sys, ret_vdso = -1, ret_vsys = -1;
+
+       printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
+
+       cpu_set_t cpuset;
+       CPU_ZERO(&cpuset);
+       CPU_SET(cpu, &cpuset);
+       if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
+               printf("[SKIP]\tfailed to force CPU %d\n", cpu);
+               return nerrs;
+       }
+
+       unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
+       unsigned node = 0;
+       bool have_node = false;
+       ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
+       if (vdso_getcpu)
+               ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
+       if (vgetcpu)
+               ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
+
+       if (ret_sys == 0) {
+               if (cpu_sys != cpu) {
+                       printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
+                       nerrs++;
+               }
+
+               have_node = true;
+               node = node_sys;
+       }
+
+       if (vdso_getcpu) {
+               if (ret_vdso) {
+                       printf("[FAIL]\tvDSO getcpu() failed\n");
+                       nerrs++;
+               } else {
+                       if (!have_node) {
+                               have_node = true;
+                               node = node_vdso;
+                       }
+
+                       if (cpu_vdso != cpu) {
+                               printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
+                               nerrs++;
+                       } else {
+                               printf("[OK]\tvDSO reported correct CPU\n");
+                       }
+
+                       if (node_vdso != node) {
+                               printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
+                               nerrs++;
+                       } else {
+                               printf("[OK]\tvDSO reported correct node\n");
+                       }
+               }
+       }
+
+       if (vgetcpu) {
+               if (ret_vsys) {
+                       printf("[FAIL]\tvsyscall getcpu() failed\n");
+                       nerrs++;
+               } else {
+                       if (!have_node) {
+                               have_node = true;
+                               node = node_vsys;
+                       }
+
+                       if (cpu_vsys != cpu) {
+                               printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
+                               nerrs++;
+                       } else {
+                               printf("[OK]\tvsyscall reported correct CPU\n");
+                       }
+
+                       if (node_vsys != node) {
+                               printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
+                               nerrs++;
+                       } else {
+                               printf("[OK]\tvsyscall reported correct node\n");
+                       }
+               }
+       }
+
+       return nerrs;
+}
+
+static int test_vsys_r(void)
+{
+#ifdef __x86_64__
+       printf("[RUN]\tChecking read access to the vsyscall page\n");
+       bool can_read;
+       if (sigsetjmp(jmpbuf, 1) == 0) {
+               *(volatile int *)0xffffffffff600000;
+               can_read = true;
+       } else {
+               can_read = false;
+       }
+
+       if (can_read && !should_read_vsyscall) {
+               printf("[FAIL]\tWe have read access, but we shouldn't\n");
+               return 1;
+       } else if (!can_read && should_read_vsyscall) {
+               printf("[FAIL]\tWe don't have read access, but we should\n");
+               return 1;
+       } else {
+               printf("[OK]\tgot expected result\n");
+       }
+#endif
+
+       return 0;
+}
+
+
+#ifdef __x86_64__
+#define X86_EFLAGS_TF (1UL << 8)
+static volatile sig_atomic_t num_vsyscall_traps;
+
+static unsigned long get_eflags(void)
+{
+       unsigned long eflags;
+       asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
+       return eflags;
+}
+
+static void set_eflags(unsigned long eflags)
+{
+       asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
+}
+
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+       ucontext_t *ctx = (ucontext_t *)ctx_void;
+       unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
+
+       if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
+               num_vsyscall_traps++;
+}
+
+static int test_native_vsyscall(void)
+{
+       time_t tmp;
+       bool is_native;
+
+       if (!vtime)
+               return 0;
+
+       printf("[RUN]\tchecking for native vsyscall\n");
+       sethandler(SIGTRAP, sigtrap, 0);
+       set_eflags(get_eflags() | X86_EFLAGS_TF);
+       vtime(&tmp);
+       set_eflags(get_eflags() & ~X86_EFLAGS_TF);
+
+       /*
+        * If vsyscalls are emulated, we expect a single trap in the
+        * vsyscall page -- the call instruction will trap with RIP
+        * pointing to the entry point before emulation takes over.
+        * In native mode, we expect two traps, since whatever code
+        * the vsyscall page contains will be more than just a ret
+        * instruction.
+        */
+       is_native = (num_vsyscall_traps > 1);
+
+       printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n",
+              (is_native ? "native" : "emulated"),
+              (int)num_vsyscall_traps);
+
+       return 0;
+}
+#endif
+
+int main(int argc, char **argv)
+{
+       int nerrs = 0;
+
+       init_vdso();
+       nerrs += init_vsys();
+
+       nerrs += test_gtod();
+       nerrs += test_time();
+       nerrs += test_getcpu(0);
+       nerrs += test_getcpu(1);
+
+       sethandler(SIGSEGV, sigsegv, 0);
+       nerrs += test_vsys_r();
+
+#ifdef __x86_64__
+       nerrs += test_native_vsyscall();
+#endif
+
+       return nerrs ? 1 : 0;
+}
index 5727dfb..c9c8161 100644 (file)
@@ -50,14 +50,14 @@ static int parse_status(const char *value)
 
        while (*c != '\0') {
                int port, status, speed, devid;
-               unsigned long socket;
+               int sockfd;
                char lbusid[SYSFS_BUS_ID_SIZE];
                struct usbip_imported_device *idev;
                char hub[3];
 
-               ret = sscanf(c, "%2s  %d %d %d %x %lx %31s\n",
+               ret = sscanf(c, "%2s  %d %d %d %x %u %31s\n",
                                hub, &port, &status, &speed,
-                               &devid, &socket, lbusid);
+                               &devid, &sockfd, lbusid);
 
                if (ret < 5) {
                        dbg("sscanf failed: %d", ret);
@@ -66,7 +66,7 @@ static int parse_status(const char *value)
 
                dbg("hub %s port %d status %d speed %d devid %x",
                                hub, port, status, speed, devid);
-               dbg("socket %lx lbusid %s", socket, lbusid);
+               dbg("sockfd %u lbusid %s", sockfd, lbusid);
 
                /* if a device is connected, look at it */
                idev = &vhci_driver->idev[port];
@@ -106,7 +106,7 @@ static int parse_status(const char *value)
        return 0;
 }
 
-#define MAX_STATUS_NAME 16
+#define MAX_STATUS_NAME 18
 
 static int refresh_imported_device_list(void)
 {
@@ -329,9 +329,17 @@ err:
 int usbip_vhci_get_free_port(uint32_t speed)
 {
        for (int i = 0; i < vhci_driver->nports; i++) {
-               if (speed == USB_SPEED_SUPER &&
-                   vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
-                       continue;
+
+               switch (speed) {
+               case    USB_SPEED_SUPER:
+                       if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
+                               continue;
+               break;
+               default:
+                       if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH)
+                               continue;
+               break;
+               }
 
                if (vhci_driver->idev[i].status == VDEV_ST_NULL)
                        return vhci_driver->idev[i].port;
index 2b3d6d2..3d7b42e 100644 (file)
@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
        char command[SYSFS_BUS_ID_SIZE + 4];
        char match_busid_attr_path[SYSFS_PATH_MAX];
        int rc;
+       int cmd_size;
 
        snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
                 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
                 attr_name);
 
        if (add)
-               snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
+               cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
+                                   busid);
        else
-               snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
+               cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
+                                   busid);
 
        rc = write_sysfs_attribute(match_busid_attr_path, command,
-                                  sizeof(command));
+                                  cmd_size);
        if (rc < 0) {
                dbg("failed to write match_busid: %s", strerror(errno));
                return -1;
index 38bb171..e6e8130 100644 (file)
 #define unlikely(x)    (__builtin_expect(!!(x), 0))
 #define likely(x)    (__builtin_expect(!!(x), 1))
 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
+#define SIZE_MAX        (~(size_t)0)
+
 typedef pthread_spinlock_t  spinlock_t;
 
 typedef int gfp_t;
-static void *kmalloc(unsigned size, gfp_t gfp)
-{
-       return memalign(64, size);
-}
+#define __GFP_ZERO 0x1
 
-static void *kzalloc(unsigned size, gfp_t gfp)
+static void *kmalloc(unsigned size, gfp_t gfp)
 {
        void *p = memalign(64, size);
        if (!p)
                return p;
-       memset(p, 0, size);
 
+       if (gfp & __GFP_ZERO)
+               memset(p, 0, size);
        return p;
 }
 
+static inline void *kzalloc(unsigned size, gfp_t flags)
+{
+       return kmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+       if (size != 0 && n > SIZE_MAX / size)
+               return NULL;
+       return kmalloc(n * size, flags);
+}
+
+static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+{
+       return kmalloc_array(n, size, flags | __GFP_ZERO);
+}
+
 static void kfree(void *p)
 {
        if (p)
index 35b0398..0cf28aa 100644 (file)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 # Sergey Senozhatsky, 2015
 # sergey.senozhatsky.work@gmail.com
index 4151250..cc29a81 100644 (file)
@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
        struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
        struct arch_timer_context *vtimer;
+       u32 cnt_ctl;
 
-       if (!vcpu) {
-               pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
-               return IRQ_NONE;
-       }
-       vtimer = vcpu_vtimer(vcpu);
+       /*
+        * We may see a timer interrupt after vcpu_put() has been called which
+        * sets the CPU's vcpu pointer to NULL, because even though the timer
+        * has been disabled in vtimer_save_state(), the hardware interrupt
+        * signal may not have been retired from the interrupt controller yet.
+        */
+       if (!vcpu)
+               return IRQ_HANDLED;
 
+       vtimer = vcpu_vtimer(vcpu);
        if (!vtimer->irq.level) {
-               vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-               if (kvm_timer_irq_can_fire(vtimer))
+               cnt_ctl = read_sysreg_el0(cntv_ctl);
+               cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
+                          ARCH_TIMER_CTRL_IT_MASK;
+               if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
                        kvm_timer_update_irq(vcpu, true, vtimer);
        }
 
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
 
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
+       isb();
 
        vtimer->loaded = false;
 out:
@@ -479,9 +487,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
 
        vtimer_restore_state(vcpu);
 
-       if (has_vhe())
-               disable_el1_phys_timer_access();
-
        /* Set the background timer for the physical timer emulation. */
        phys_timer_emulate(vcpu);
 }
@@ -510,9 +515,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
        if (unlikely(!timer->enabled))
                return;
 
-       if (has_vhe())
-               enable_el1_phys_timer_access();
-
        vtimer_save_state(vcpu);
 
        /*
@@ -726,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
        return 0;
 }
 
-int kvm_timer_hyp_init(void)
+int kvm_timer_hyp_init(bool has_gic)
 {
        struct arch_timer_kvm_info *info;
        int err;
@@ -762,10 +764,13 @@ int kvm_timer_hyp_init(void)
                return err;
        }
 
-       err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus());
-       if (err) {
-               kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
-               goto out_free_irq;
+       if (has_gic) {
+               err = irq_set_vcpu_affinity(host_vtimer_irq,
+                                           kvm_get_running_vcpus());
+               if (err) {
+                       kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+                       goto out_free_irq;
+               }
        }
 
        kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -841,7 +846,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 no_vgic:
        preempt_disable();
        timer->enabled = 1;
-       kvm_timer_vcpu_load_vgic(vcpu);
+       kvm_timer_vcpu_load(vcpu);
        preempt_enable();
 
        return 0;
index a6524ff..2e43f9d 100644 (file)
@@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                        kvm->vcpus[i] = NULL;
                }
        }
+       atomic_set(&kvm->online_vcpus, 0);
 }
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
        kvm_mmu_free_memory_caches(vcpu);
        kvm_timer_vcpu_terminate(vcpu);
-       kvm_vgic_vcpu_destroy(vcpu);
        kvm_pmu_vcpu_destroy(vcpu);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -615,7 +615,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int ret;
-       sigset_t sigsaved;
 
        if (unlikely(!kvm_vcpu_initialized(vcpu)))
                return -ENOEXEC;
@@ -628,13 +627,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                ret = kvm_handle_mmio_return(vcpu, vcpu->run);
                if (ret)
                        return ret;
+               if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
+                       return 0;
+
        }
 
        if (run->immediate_exit)
                return -EINTR;
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        ret = 1;
        run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -769,8 +770,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                kvm_pmu_update_run(vcpu);
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
+
        return ret;
 }
 
@@ -1325,7 +1326,7 @@ static int init_subsystems(void)
        /*
         * Init HYP architected timer support
         */
-       err = kvm_timer_hyp_init();
+       err = kvm_timer_hyp_init(vgic_present);
        if (err)
                goto out;
 
@@ -1504,7 +1505,7 @@ int kvm_arch_init(void *opaque)
        bool in_hyp_mode;
 
        if (!is_hyp_mode_available()) {
-               kvm_err("HYP mode not available\n");
+               kvm_info("HYP mode not available\n");
                return -ENODEV;
        }
 
index f398616..f24404b 100644 (file)
@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
        write_sysreg(cntvoff, cntvoff_el2);
 }
 
-void __hyp_text enable_el1_phys_timer_access(void)
-{
-       u64 val;
-
-       /* Allow physical timer/counter access for the host */
-       val = read_sysreg(cnthctl_el2);
-       val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
-       write_sysreg(val, cnthctl_el2);
-}
-
-void __hyp_text disable_el1_phys_timer_access(void)
-{
-       u64 val;
-
-       /*
-        * Disallow physical timer access for the guest
-        * Physical counter access is allowed
-        */
-       val = read_sysreg(cnthctl_el2);
-       val &= ~CNTHCTL_EL1PCEN;
-       val |= CNTHCTL_EL1PCTEN;
-       write_sysreg(val, cnthctl_el2);
-}
-
 void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
 {
        /*
         * We don't need to do this for VHE since the host kernel runs in EL2
         * with HCR_EL2.TGE ==1, which makes those bits have no impact.
         */
-       if (!has_vhe())
-               enable_el1_phys_timer_access();
+       if (!has_vhe()) {
+               u64 val;
+
+               /* Allow physical timer/counter access for the host */
+               val = read_sysreg(cnthctl_el2);
+               val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 }
 
 void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
 {
-       if (!has_vhe())
-               disable_el1_phys_timer_access();
+       if (!has_vhe()) {
+               u64 val;
+
+               /*
+                * Disallow physical timer access for the guest
+                * Physical counter access is allowed
+                */
+               val = read_sysreg(cnthctl_el2);
+               val &= ~CNTHCTL_EL1PCEN;
+               val |= CNTHCTL_EL1PCTEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 }
index a3f18d3..d7fd46f 100644 (file)
@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
        else
                elrsr1 = 0;
 
-#ifdef CONFIG_CPU_BIG_ENDIAN
-       cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
-#else
        cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
-#endif
 }
 
 static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
index b6e715f..dac7ceb 100644 (file)
@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
                }
 
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-                              data);
+                              &data);
                data = vcpu_data_host_to_guest(vcpu, data, len);
                vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
        }
@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
                data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
                                               len);
 
-               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
                kvm_mmio_write_buf(data_buf, len, data);
 
                ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                                       data_buf);
        } else {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
-                              fault_ipa, 0);
+                              fault_ipa, NULL);
 
                ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                                      data_buf);
index b36945d..9dea963 100644 (file)
@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
  */
 void free_hyp_pgds(void)
 {
-       unsigned long addr;
-
        mutex_lock(&kvm_hyp_pgd_mutex);
 
        if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
 
        if (hyp_pgd) {
                unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-               for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-               for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+               unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
+                               (uintptr_t)high_memory - PAGE_OFFSET);
+               unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
+                               VMALLOC_END - VMALLOC_START);
 
                free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
                hyp_pgd = NULL;
@@ -1312,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
-       if (is_vm_hugetlb_page(vma) && !logging_active) {
+       if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
                hugetlb = true;
                gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
        } else {
index 6231012..743ca5c 100644 (file)
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
        if (ret)
                goto out;
 
-       ret = vgic_v4_init(kvm);
-       if (ret)
-               goto out;
+       if (vgic_has_its(kvm)) {
+               ret = vgic_v4_init(kvm);
+               if (ret)
+                       goto out;
+       }
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_vgic_vcpu_enable(vcpu);
index b7baf58..99e026d 100644 (file)
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
        u32 nr = dist->nr_spis;
        int i, ret;
 
-       entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
-                         GFP_KERNEL);
+       entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
        if (!entries)
                return -ENOMEM;
 
index 1f761a9..8e633bd 100644 (file)
@@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
        u32 *intids;
        int nr_irqs, i;
        unsigned long flags;
+       u8 pendmask;
 
        nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
        if (nr_irqs < 0)
@@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 
        for (i = 0; i < nr_irqs; i++) {
                int byte_offset, bit_nr;
-               u8 pendmask;
 
                byte_offset = intids[i] / BITS_PER_BYTE;
                bit_nr = intids[i] % BITS_PER_BYTE;
@@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
                return E_ITS_MAPC_COLLECTION_OOR;
 
        collection = kzalloc(sizeof(*collection), GFP_KERNEL);
+       if (!collection)
+               return -ENOMEM;
 
        collection->collection_id = coll_id;
        collection->target_addr = COLLECTION_NOT_MAPPED;
index 2f05f73..f47e848 100644 (file)
@@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
        int last_byte_offset = -1;
        struct vgic_irq *irq;
        int ret;
+       u8 val;
 
        list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
                int byte_offset, bit_nr;
                struct kvm_vcpu *vcpu;
                gpa_t pendbase, ptr;
                bool stored;
-               u8 val;
 
                vcpu = irq->target_vcpu;
                if (!vcpu)
index 53c324a..bc42651 100644 (file)
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
        struct kvm_vcpu *vcpu;
        int i, nr_vcpus, ret;
 
-       if (!vgic_supports_direct_msis(kvm))
+       if (!kvm_vgic_global_state.has_gicv4)
                return 0; /* Nothing to see here... move along. */
 
        if (dist->its_vm.vpes)
@@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
                goto out;
 
        WARN_ON(!(irq->hw && irq->host_irq == virq));
-       irq->hw = false;
-       ret = its_unmap_vlpi(virq);
+       if (irq->hw) {
+               irq->hw = false;
+               ret = its_unmap_vlpi(virq);
+       }
 
 out:
        mutex_unlock(&its->its_lock);
index b168a32..ecb8e25 100644 (file)
@@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
 {
        struct vgic_irq *irq;
+       unsigned long flags;
        int ret = 0;
 
        if (!vgic_initialized(vcpu->kvm))
@@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
                return -EINVAL;
 
        irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
        if (irq->owner && irq->owner != owner)
                ret = -EEXIST;
        else
                irq->owner = owner;
-       spin_unlock(&irq->irq_lock);
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
 
        return ret;
 }
@@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm)
 
 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
 {
-       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+       struct vgic_irq *irq;
        bool map_is_active;
        unsigned long flags;
 
        if (!vgic_initialized(vcpu->kvm))
                return false;
 
+       irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
        spin_lock_irqsave(&irq->irq_lock, flags);
        map_is_active = irq->hw && irq->active;
        spin_unlock_irqrestore(&irq->irq_lock, flags);
index f169ecc..210bf82 100644 (file)
@@ -135,6 +135,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
 static unsigned long long kvm_createvm_count;
 static unsigned long long kvm_active_vms;
 
+__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end)
+{
+}
+
 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
 {
        if (pfn_valid(pfn))
@@ -360,6 +365,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                kvm_flush_remote_tlbs(kvm);
 
        spin_unlock(&kvm->mmu_lock);
+
+       kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
+
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -2065,6 +2073,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
 
+void kvm_sigset_activate(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu->sigset_active)
+               return;
+
+       /*
+        * This does a lockless modification of ->real_blocked, which is fine
+        * because, only current can change ->real_blocked and all readers of
+        * ->real_blocked don't care as long ->real_blocked is always a subset
+        * of ->blocked.
+        */
+       sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
+}
+
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu->sigset_active)
+               return;
+
+       sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
+       sigemptyset(&current->real_blocked);
+}
+
 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
        unsigned int old, val, grow;